Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: datatransfer.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class DataTransferProtos {
private DataTransferProtos() {}
public static void registerAllExtensions(
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
}
/**
*
* Status is a 4-bit enum
*
*
* Protobuf enum {@code hadoop.hdfs.Status}
*/
public enum Status
implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
/**
* SUCCESS = 0;
*/
SUCCESS(0),
/**
* ERROR = 1;
*/
ERROR(1),
/**
* ERROR_CHECKSUM = 2;
*/
ERROR_CHECKSUM(2),
/**
* ERROR_INVALID = 3;
*/
ERROR_INVALID(3),
/**
* ERROR_EXISTS = 4;
*/
ERROR_EXISTS(4),
/**
* ERROR_ACCESS_TOKEN = 5;
*/
ERROR_ACCESS_TOKEN(5),
/**
* CHECKSUM_OK = 6;
*/
CHECKSUM_OK(6),
/**
* ERROR_UNSUPPORTED = 7;
*/
ERROR_UNSUPPORTED(7),
/**
*
* Quick restart
*
*
* OOB_RESTART = 8;
*/
OOB_RESTART(8),
/**
*
* Reserved
*
*
* OOB_RESERVED1 = 9;
*/
OOB_RESERVED1(9),
/**
*
* Reserved
*
*
* OOB_RESERVED2 = 10;
*/
OOB_RESERVED2(10),
/**
*
* Reserved
*
*
* OOB_RESERVED3 = 11;
*/
OOB_RESERVED3(11),
/**
* IN_PROGRESS = 12;
*/
IN_PROGRESS(12),
/**
* ERROR_BLOCK_PINNED = 13;
*/
ERROR_BLOCK_PINNED(13),
;
/**
* SUCCESS = 0;
*/
public static final int SUCCESS_VALUE = 0;
/**
* ERROR = 1;
*/
public static final int ERROR_VALUE = 1;
/**
* ERROR_CHECKSUM = 2;
*/
public static final int ERROR_CHECKSUM_VALUE = 2;
/**
* ERROR_INVALID = 3;
*/
public static final int ERROR_INVALID_VALUE = 3;
/**
* ERROR_EXISTS = 4;
*/
public static final int ERROR_EXISTS_VALUE = 4;
/**
* ERROR_ACCESS_TOKEN = 5;
*/
public static final int ERROR_ACCESS_TOKEN_VALUE = 5;
/**
* CHECKSUM_OK = 6;
*/
public static final int CHECKSUM_OK_VALUE = 6;
/**
* ERROR_UNSUPPORTED = 7;
*/
public static final int ERROR_UNSUPPORTED_VALUE = 7;
/**
*
* Quick restart
*
*
* OOB_RESTART = 8;
*/
public static final int OOB_RESTART_VALUE = 8;
/**
*
* Reserved
*
*
* OOB_RESERVED1 = 9;
*/
public static final int OOB_RESERVED1_VALUE = 9;
/**
*
* Reserved
*
*
* OOB_RESERVED2 = 10;
*/
public static final int OOB_RESERVED2_VALUE = 10;
/**
*
* Reserved
*
*
* OOB_RESERVED3 = 11;
*/
public static final int OOB_RESERVED3_VALUE = 11;
/**
* IN_PROGRESS = 12;
*/
public static final int IN_PROGRESS_VALUE = 12;
/**
* ERROR_BLOCK_PINNED = 13;
*/
public static final int ERROR_BLOCK_PINNED_VALUE = 13;
public final int getNumber() {
return value;
}
/**
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static Status valueOf(int value) {
return forNumber(value);
}
public static Status forNumber(int value) {
switch (value) {
case 0: return SUCCESS;
case 1: return ERROR;
case 2: return ERROR_CHECKSUM;
case 3: return ERROR_INVALID;
case 4: return ERROR_EXISTS;
case 5: return ERROR_ACCESS_TOKEN;
case 6: return CHECKSUM_OK;
case 7: return ERROR_UNSUPPORTED;
case 8: return OOB_RESTART;
case 9: return OOB_RESERVED1;
case 10: return OOB_RESERVED2;
case 11: return OOB_RESERVED3;
case 12: return IN_PROGRESS;
case 13: return ERROR_BLOCK_PINNED;
default: return null;
}
}
public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
Status> internalValueMap =
new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
public Status findValueByNumber(int number) {
return Status.forNumber(number);
}
};
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(0);
}
private static final Status[] VALUES = values();
public static Status valueOf(
org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private Status(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.Status)
}
/**
* Protobuf enum {@code hadoop.hdfs.ShortCircuitFdResponse}
*/
public enum ShortCircuitFdResponse
implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
/**
* DO_NOT_USE_RECEIPT_VERIFICATION = 0;
*/
DO_NOT_USE_RECEIPT_VERIFICATION(0),
/**
* USE_RECEIPT_VERIFICATION = 1;
*/
USE_RECEIPT_VERIFICATION(1),
;
/**
* DO_NOT_USE_RECEIPT_VERIFICATION = 0;
*/
public static final int DO_NOT_USE_RECEIPT_VERIFICATION_VALUE = 0;
/**
* USE_RECEIPT_VERIFICATION = 1;
*/
public static final int USE_RECEIPT_VERIFICATION_VALUE = 1;
public final int getNumber() {
return value;
}
/**
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static ShortCircuitFdResponse valueOf(int value) {
return forNumber(value);
}
public static ShortCircuitFdResponse forNumber(int value) {
switch (value) {
case 0: return DO_NOT_USE_RECEIPT_VERIFICATION;
case 1: return USE_RECEIPT_VERIFICATION;
default: return null;
}
}
public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
ShortCircuitFdResponse> internalValueMap =
new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
public ShortCircuitFdResponse findValueByNumber(int number) {
return ShortCircuitFdResponse.forNumber(number);
}
};
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(1);
}
private static final ShortCircuitFdResponse[] VALUES = values();
public static ShortCircuitFdResponse valueOf(
org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private ShortCircuitFdResponse(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.ShortCircuitFdResponse)
}
public interface DataTransferEncryptorMessageProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.DataTransferEncryptorMessageProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus();
/**
* optional bytes payload = 2;
*/
boolean hasPayload();
/**
* optional bytes payload = 2;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString getPayload();
/**
* optional string message = 3;
*/
boolean hasMessage();
/**
* optional string message = 3;
*/
java.lang.String getMessage();
/**
* optional string message = 3;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getMessageBytes();
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
java.util.List
getCipherOptionList();
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index);
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
int getCipherOptionCount();
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>
getCipherOptionOrBuilderList();
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
int index);
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
boolean hasHandshakeSecret();
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getHandshakeSecret();
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder getHandshakeSecretOrBuilder();
/**
* optional bool accessTokenError = 6;
*/
boolean hasAccessTokenError();
/**
* optional bool accessTokenError = 6;
*/
boolean getAccessTokenError();
}
/**
* Protobuf type {@code hadoop.hdfs.DataTransferEncryptorMessageProto}
*/
public static final class DataTransferEncryptorMessageProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.DataTransferEncryptorMessageProto)
DataTransferEncryptorMessageProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use DataTransferEncryptorMessageProto.newBuilder() to construct.
private DataTransferEncryptorMessageProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private DataTransferEncryptorMessageProto() {
status_ = 0;
payload_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
message_ = "";
cipherOption_ = java.util.Collections.emptyList();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DataTransferEncryptorMessageProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = rawValue;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
payload_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000004;
message_ = bs;
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) != 0)) {
cipherOption_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
cipherOption_.add(
input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.PARSER, extensionRegistry));
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) != 0)) {
subBuilder = handshakeSecret_.toBuilder();
}
handshakeSecret_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(handshakeSecret_);
handshakeSecret_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 48: {
bitField0_ |= 0x00000010;
accessTokenError_ = input.readBool();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000008) != 0)) {
cipherOption_ = java.util.Collections.unmodifiableList(cipherOption_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.Builder.class);
}
/**
* Protobuf enum {@code hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus}
*/
public enum DataTransferEncryptorStatus
implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
/**
* SUCCESS = 0;
*/
SUCCESS(0),
/**
* ERROR_UNKNOWN_KEY = 1;
*/
ERROR_UNKNOWN_KEY(1),
/**
* ERROR = 2;
*/
ERROR(2),
;
/**
* SUCCESS = 0;
*/
public static final int SUCCESS_VALUE = 0;
/**
* ERROR_UNKNOWN_KEY = 1;
*/
public static final int ERROR_UNKNOWN_KEY_VALUE = 1;
/**
* ERROR = 2;
*/
public static final int ERROR_VALUE = 2;
public final int getNumber() {
return value;
}
/**
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static DataTransferEncryptorStatus valueOf(int value) {
return forNumber(value);
}
public static DataTransferEncryptorStatus forNumber(int value) {
switch (value) {
case 0: return SUCCESS;
case 1: return ERROR_UNKNOWN_KEY;
case 2: return ERROR;
default: return null;
}
}
public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
DataTransferEncryptorStatus> internalValueMap =
new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
public DataTransferEncryptorStatus findValueByNumber(int number) {
return DataTransferEncryptorStatus.forNumber(number);
}
};
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDescriptor().getEnumTypes().get(0);
}
private static final DataTransferEncryptorStatus[] VALUES = values();
public static DataTransferEncryptorStatus valueOf(
org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private DataTransferEncryptorStatus(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus)
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_;
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.SUCCESS : result;
}
public static final int PAYLOAD_FIELD_NUMBER = 2;
private org.apache.hadoop.thirdparty.protobuf.ByteString payload_;
/**
* optional bytes payload = 2;
*/
public boolean hasPayload() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional bytes payload = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString getPayload() {
return payload_;
}
public static final int MESSAGE_FIELD_NUMBER = 3;
private volatile java.lang.Object message_;
/**
* optional string message = 3;
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional string message = 3;
*/
public java.lang.String getMessage() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
message_ = s;
}
return s;
}
}
/**
* optional string message = 3;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int CIPHEROPTION_FIELD_NUMBER = 4;
private java.util.List cipherOption_;
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List getCipherOptionList() {
return cipherOption_;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>
getCipherOptionOrBuilderList() {
return cipherOption_;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public int getCipherOptionCount() {
return cipherOption_.size();
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index) {
return cipherOption_.get(index);
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
int index) {
return cipherOption_.get(index);
}
public static final int HANDSHAKESECRET_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto handshakeSecret_;
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public boolean hasHandshakeSecret() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getHandshakeSecret() {
return handshakeSecret_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance() : handshakeSecret_;
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder getHandshakeSecretOrBuilder() {
return handshakeSecret_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance() : handshakeSecret_;
}
public static final int ACCESSTOKENERROR_FIELD_NUMBER = 6;
private boolean accessTokenError_;
/**
* optional bool accessTokenError = 6;
*/
public boolean hasAccessTokenError() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional bool accessTokenError = 6;
*/
public boolean getAccessTokenError() {
return accessTokenError_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getCipherOptionCount(); i++) {
if (!getCipherOption(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasHandshakeSecret()) {
if (!getHandshakeSecret().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeBytes(2, payload_);
}
if (((bitField0_ & 0x00000004) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, message_);
}
for (int i = 0; i < cipherOption_.size(); i++) {
output.writeMessage(4, cipherOption_.get(i));
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeMessage(5, getHandshakeSecret());
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeBool(6, accessTokenError_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBytesSize(2, payload_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, message_);
}
for (int i = 0; i < cipherOption_.size(); i++) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(4, cipherOption_.get(i));
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(5, getHandshakeSecret());
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(6, accessTokenError_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (hasPayload() != other.hasPayload()) return false;
if (hasPayload()) {
if (!getPayload()
.equals(other.getPayload())) return false;
}
if (hasMessage() != other.hasMessage()) return false;
if (hasMessage()) {
if (!getMessage()
.equals(other.getMessage())) return false;
}
if (!getCipherOptionList()
.equals(other.getCipherOptionList())) return false;
if (hasHandshakeSecret() != other.hasHandshakeSecret()) return false;
if (hasHandshakeSecret()) {
if (!getHandshakeSecret()
.equals(other.getHandshakeSecret())) return false;
}
if (hasAccessTokenError() != other.hasAccessTokenError()) return false;
if (hasAccessTokenError()) {
if (getAccessTokenError()
!= other.getAccessTokenError()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
if (hasPayload()) {
hash = (37 * hash) + PAYLOAD_FIELD_NUMBER;
hash = (53 * hash) + getPayload().hashCode();
}
if (hasMessage()) {
hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
hash = (53 * hash) + getMessage().hashCode();
}
if (getCipherOptionCount() > 0) {
hash = (37 * hash) + CIPHEROPTION_FIELD_NUMBER;
hash = (53 * hash) + getCipherOptionList().hashCode();
}
if (hasHandshakeSecret()) {
hash = (37 * hash) + HANDSHAKESECRET_FIELD_NUMBER;
hash = (53 * hash) + getHandshakeSecret().hashCode();
}
if (hasAccessTokenError()) {
hash = (37 * hash) + ACCESSTOKENERROR_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getAccessTokenError());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DataTransferEncryptorMessageProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.DataTransferEncryptorMessageProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getCipherOptionFieldBuilder();
getHandshakeSecretFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
status_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
payload_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
message_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
if (cipherOptionBuilder_ == null) {
cipherOption_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
} else {
cipherOptionBuilder_.clear();
}
if (handshakeSecretBuilder_ == null) {
handshakeSecret_ = null;
} else {
handshakeSecretBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
accessTokenError_ = false;
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.payload_ = payload_;
if (((from_bitField0_ & 0x00000004) != 0)) {
to_bitField0_ |= 0x00000004;
}
result.message_ = message_;
if (cipherOptionBuilder_ == null) {
if (((bitField0_ & 0x00000008) != 0)) {
cipherOption_ = java.util.Collections.unmodifiableList(cipherOption_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.cipherOption_ = cipherOption_;
} else {
result.cipherOption_ = cipherOptionBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) != 0)) {
if (handshakeSecretBuilder_ == null) {
result.handshakeSecret_ = handshakeSecret_;
} else {
result.handshakeSecret_ = handshakeSecretBuilder_.build();
}
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
result.accessTokenError_ = accessTokenError_;
to_bitField0_ |= 0x00000010;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasPayload()) {
setPayload(other.getPayload());
}
if (other.hasMessage()) {
bitField0_ |= 0x00000004;
message_ = other.message_;
onChanged();
}
if (cipherOptionBuilder_ == null) {
if (!other.cipherOption_.isEmpty()) {
if (cipherOption_.isEmpty()) {
cipherOption_ = other.cipherOption_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureCipherOptionIsMutable();
cipherOption_.addAll(other.cipherOption_);
}
onChanged();
}
} else {
if (!other.cipherOption_.isEmpty()) {
if (cipherOptionBuilder_.isEmpty()) {
cipherOptionBuilder_.dispose();
cipherOptionBuilder_ = null;
cipherOption_ = other.cipherOption_;
bitField0_ = (bitField0_ & ~0x00000008);
cipherOptionBuilder_ =
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getCipherOptionFieldBuilder() : null;
} else {
cipherOptionBuilder_.addAllMessages(other.cipherOption_);
}
}
}
if (other.hasHandshakeSecret()) {
mergeHandshakeSecret(other.getHandshakeSecret());
}
if (other.hasAccessTokenError()) {
setAccessTokenError(other.getAccessTokenError());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
for (int i = 0; i < getCipherOptionCount(); i++) {
if (!getCipherOption(i).isInitialized()) {
return false;
}
}
if (hasHandshakeSecret()) {
if (!getHandshakeSecret().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.SUCCESS : result;
}
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.ByteString payload_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
* optional bytes payload = 2;
*/
public boolean hasPayload() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional bytes payload = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString getPayload() {
return payload_;
}
/**
* optional bytes payload = 2;
*/
public Builder setPayload(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
payload_ = value;
onChanged();
return this;
}
/**
* optional bytes payload = 2;
*/
public Builder clearPayload() {
bitField0_ = (bitField0_ & ~0x00000002);
payload_ = getDefaultInstance().getPayload();
onChanged();
return this;
}
private java.lang.Object message_ = "";
/**
* optional string message = 3;
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional string message = 3;
*/
public java.lang.String getMessage() {
java.lang.Object ref = message_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
message_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string message = 3;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string message = 3;
*/
public Builder setMessage(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
message_ = value;
onChanged();
return this;
}
/**
* optional string message = 3;
*/
public Builder clearMessage() {
bitField0_ = (bitField0_ & ~0x00000004);
message_ = getDefaultInstance().getMessage();
onChanged();
return this;
}
/**
* optional string message = 3;
*/
public Builder setMessageBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
message_ = value;
onChanged();
return this;
}
private java.util.List cipherOption_ =
java.util.Collections.emptyList();
private void ensureCipherOptionIsMutable() {
if (!((bitField0_ & 0x00000008) != 0)) {
cipherOption_ = new java.util.ArrayList(cipherOption_);
bitField0_ |= 0x00000008;
}
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder> cipherOptionBuilder_;
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List getCipherOptionList() {
if (cipherOptionBuilder_ == null) {
return java.util.Collections.unmodifiableList(cipherOption_);
} else {
return cipherOptionBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public int getCipherOptionCount() {
if (cipherOptionBuilder_ == null) {
return cipherOption_.size();
} else {
return cipherOptionBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index) {
if (cipherOptionBuilder_ == null) {
return cipherOption_.get(index);
} else {
return cipherOptionBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder setCipherOption(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
if (cipherOptionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCipherOptionIsMutable();
cipherOption_.set(index, value);
onChanged();
} else {
cipherOptionBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder setCipherOption(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
cipherOption_.set(index, builderForValue.build());
onChanged();
} else {
cipherOptionBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addCipherOption(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
if (cipherOptionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCipherOptionIsMutable();
cipherOption_.add(value);
onChanged();
} else {
cipherOptionBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addCipherOption(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
if (cipherOptionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCipherOptionIsMutable();
cipherOption_.add(index, value);
onChanged();
} else {
cipherOptionBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addCipherOption(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
cipherOption_.add(builderForValue.build());
onChanged();
} else {
cipherOptionBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addCipherOption(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
cipherOption_.add(index, builderForValue.build());
onChanged();
} else {
cipherOptionBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addAllCipherOption(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto> values) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, cipherOption_);
onChanged();
} else {
cipherOptionBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder clearCipherOption() {
if (cipherOptionBuilder_ == null) {
cipherOption_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
} else {
cipherOptionBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder removeCipherOption(int index) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
cipherOption_.remove(index);
onChanged();
} else {
cipherOptionBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder getCipherOptionBuilder(
int index) {
return getCipherOptionFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
int index) {
if (cipherOptionBuilder_ == null) {
return cipherOption_.get(index); } else {
return cipherOptionBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>
getCipherOptionOrBuilderList() {
if (cipherOptionBuilder_ != null) {
return cipherOptionBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(cipherOption_);
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder addCipherOptionBuilder() {
return getCipherOptionFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder addCipherOptionBuilder(
int index) {
return getCipherOptionFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List
getCipherOptionBuilderList() {
return getCipherOptionFieldBuilder().getBuilderList();
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>
getCipherOptionFieldBuilder() {
if (cipherOptionBuilder_ == null) {
cipherOptionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>(
cipherOption_,
((bitField0_ & 0x00000008) != 0),
getParentForChildren(),
isClean());
cipherOption_ = null;
}
return cipherOptionBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto handshakeSecret_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder> handshakeSecretBuilder_;
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public boolean hasHandshakeSecret() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getHandshakeSecret() {
if (handshakeSecretBuilder_ == null) {
return handshakeSecret_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance() : handshakeSecret_;
} else {
return handshakeSecretBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public Builder setHandshakeSecret(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto value) {
if (handshakeSecretBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
handshakeSecret_ = value;
onChanged();
} else {
handshakeSecretBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public Builder setHandshakeSecret(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder builderForValue) {
if (handshakeSecretBuilder_ == null) {
handshakeSecret_ = builderForValue.build();
onChanged();
} else {
handshakeSecretBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public Builder mergeHandshakeSecret(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto value) {
if (handshakeSecretBuilder_ == null) {
if (((bitField0_ & 0x00000010) != 0) &&
handshakeSecret_ != null &&
handshakeSecret_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance()) {
handshakeSecret_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.newBuilder(handshakeSecret_).mergeFrom(value).buildPartial();
} else {
handshakeSecret_ = value;
}
onChanged();
} else {
handshakeSecretBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public Builder clearHandshakeSecret() {
if (handshakeSecretBuilder_ == null) {
handshakeSecret_ = null;
onChanged();
} else {
handshakeSecretBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder getHandshakeSecretBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getHandshakeSecretFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder getHandshakeSecretOrBuilder() {
if (handshakeSecretBuilder_ != null) {
return handshakeSecretBuilder_.getMessageOrBuilder();
} else {
return handshakeSecret_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance() : handshakeSecret_;
}
}
/**
* optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder>
getHandshakeSecretFieldBuilder() {
if (handshakeSecretBuilder_ == null) {
handshakeSecretBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder>(
getHandshakeSecret(),
getParentForChildren(),
isClean());
handshakeSecret_ = null;
}
return handshakeSecretBuilder_;
}
private boolean accessTokenError_ ;
/**
* optional bool accessTokenError = 6;
*/
public boolean hasAccessTokenError() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* optional bool accessTokenError = 6;
*/
public boolean getAccessTokenError() {
return accessTokenError_;
}
/**
* optional bool accessTokenError = 6;
*/
public Builder setAccessTokenError(boolean value) {
bitField0_ |= 0x00000020;
accessTokenError_ = value;
onChanged();
return this;
}
/**
* optional bool accessTokenError = 6;
*/
public Builder clearAccessTokenError() {
bitField0_ = (bitField0_ & ~0x00000020);
accessTokenError_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataTransferEncryptorMessageProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DataTransferEncryptorMessageProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public DataTransferEncryptorMessageProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new DataTransferEncryptorMessageProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface HandshakeSecretProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.HandshakeSecretProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required bytes secret = 1;
*/
boolean hasSecret();
/**
* required bytes secret = 1;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString getSecret();
/**
* required string bpid = 2;
*/
boolean hasBpid();
/**
* required string bpid = 2;
*/
java.lang.String getBpid();
/**
* required string bpid = 2;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getBpidBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.HandshakeSecretProto}
*/
public static final class HandshakeSecretProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.HandshakeSecretProto)
HandshakeSecretProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use HandshakeSecretProto.newBuilder() to construct.
private HandshakeSecretProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private HandshakeSecretProto() {
secret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
bpid_ = "";
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private HandshakeSecretProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
bitField0_ |= 0x00000001;
secret_ = input.readBytes();
break;
}
case 18: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
bpid_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder.class);
}
private int bitField0_;
public static final int SECRET_FIELD_NUMBER = 1;
private org.apache.hadoop.thirdparty.protobuf.ByteString secret_;
/**
* required bytes secret = 1;
*/
public boolean hasSecret() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required bytes secret = 1;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString getSecret() {
return secret_;
}
public static final int BPID_FIELD_NUMBER = 2;
private volatile java.lang.Object bpid_;
/**
* required string bpid = 2;
*/
public boolean hasBpid() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string bpid = 2;
*/
public java.lang.String getBpid() {
java.lang.Object ref = bpid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
bpid_ = s;
}
return s;
}
}
/**
* required string bpid = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getBpidBytes() {
java.lang.Object ref = bpid_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
bpid_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasSecret()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBpid()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeBytes(1, secret_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, bpid_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBytesSize(1, secret_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, bpid_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto) obj;
if (hasSecret() != other.hasSecret()) return false;
if (hasSecret()) {
if (!getSecret()
.equals(other.getSecret())) return false;
}
if (hasBpid() != other.hasBpid()) return false;
if (hasBpid()) {
if (!getBpid()
.equals(other.getBpid())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasSecret()) {
hash = (37 * hash) + SECRET_FIELD_NUMBER;
hash = (53 * hash) + getSecret().hashCode();
}
if (hasBpid()) {
hash = (37 * hash) + BPID_FIELD_NUMBER;
hash = (53 * hash) + getBpid().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.HandshakeSecretProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.HandshakeSecretProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
secret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
bpid_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.secret_ = secret_;
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.bpid_ = bpid_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance()) return this;
if (other.hasSecret()) {
setSecret(other.getSecret());
}
if (other.hasBpid()) {
bitField0_ |= 0x00000002;
bpid_ = other.bpid_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasSecret()) {
return false;
}
if (!hasBpid()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.thirdparty.protobuf.ByteString secret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
* required bytes secret = 1;
*/
public boolean hasSecret() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required bytes secret = 1;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString getSecret() {
return secret_;
}
/**
* required bytes secret = 1;
*/
public Builder setSecret(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
secret_ = value;
onChanged();
return this;
}
/**
* required bytes secret = 1;
*/
public Builder clearSecret() {
bitField0_ = (bitField0_ & ~0x00000001);
secret_ = getDefaultInstance().getSecret();
onChanged();
return this;
}
private java.lang.Object bpid_ = "";
/**
* required string bpid = 2;
*/
public boolean hasBpid() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string bpid = 2;
*/
public java.lang.String getBpid() {
java.lang.Object ref = bpid_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
bpid_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string bpid = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getBpidBytes() {
java.lang.Object ref = bpid_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
bpid_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string bpid = 2;
*/
public Builder setBpid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
bpid_ = value;
onChanged();
return this;
}
/**
* required string bpid = 2;
*/
public Builder clearBpid() {
bitField0_ = (bitField0_ & ~0x00000002);
bpid_ = getDefaultInstance().getBpid();
onChanged();
return this;
}
/**
* required string bpid = 2;
*/
public Builder setBpidBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
bpid_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.HandshakeSecretProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.HandshakeSecretProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public HandshakeSecretProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new HandshakeSecretProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface BaseHeaderProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.BaseHeaderProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
/**
* optional .hadoop.common.TokenProto token = 2;
*/
boolean hasToken();
/**
* optional .hadoop.common.TokenProto token = 2;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken();
/**
* optional .hadoop.common.TokenProto token = 2;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
boolean hasTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.BaseHeaderProto}
*/
public static final class BaseHeaderProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.BaseHeaderProto)
BaseHeaderProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use BaseHeaderProto.newBuilder() to construct.
private BaseHeaderProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private BaseHeaderProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BaseHeaderProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) != 0)) {
subBuilder = token_.toBuilder();
}
token_ = input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(token_);
token_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) != 0)) {
subBuilder = traceInfo_.toBuilder();
}
traceInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(traceInfo_);
traceInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder.class);
}
private int bitField0_;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
public static final int TOKEN_FIELD_NUMBER = 2;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
return token_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
return token_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
}
public static final int TRACEINFO_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasToken()) {
if (!getToken().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getBlock());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getToken());
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getTraceInfo());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getBlock());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(2, getToken());
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getTraceInfo());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) obj;
if (hasBlock() != other.hasBlock()) return false;
if (hasBlock()) {
if (!getBlock()
.equals(other.getBlock())) return false;
}
if (hasToken() != other.hasToken()) return false;
if (hasToken()) {
if (!getToken()
.equals(other.getToken())) return false;
}
if (hasTraceInfo() != other.hasTraceInfo()) return false;
if (hasTraceInfo()) {
if (!getTraceInfo()
.equals(other.getTraceInfo())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasToken()) {
hash = (37 * hash) + TOKEN_FIELD_NUMBER;
hash = (53 * hash) + getToken().hashCode();
}
if (hasTraceInfo()) {
hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
hash = (53 * hash) + getTraceInfo().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BaseHeaderProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.BaseHeaderProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
getTokenFieldBuilder();
getTraceInfoFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = null;
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (tokenBuilder_ == null) {
token_ = null;
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (traceInfoBuilder_ == null) {
traceInfo_ = null;
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
if (tokenBuilder_ == null) {
result.token_ = token_;
} else {
result.token_ = tokenBuilder_.build();
}
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
if (traceInfoBuilder_ == null) {
result.traceInfo_ = traceInfo_;
} else {
result.traceInfo_ = traceInfoBuilder_.build();
}
to_bitField0_ |= 0x00000004;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasToken()) {
mergeToken(other.getToken());
}
if (other.hasTraceInfo()) {
mergeTraceInfo(other.getTraceInfo());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
if (hasToken()) {
if (!getToken().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
block_ != null &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = null;
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
getBlock(),
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> tokenBuilder_;
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
if (tokenBuilder_ == null) {
return token_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
} else {
return tokenBuilder_.getMessage();
}
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public Builder setToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
token_ = value;
onChanged();
} else {
tokenBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public Builder setToken(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (tokenBuilder_ == null) {
token_ = builderForValue.build();
onChanged();
} else {
tokenBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public Builder mergeToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0) &&
token_ != null &&
token_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
token_ =
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(token_).mergeFrom(value).buildPartial();
} else {
token_ = value;
}
onChanged();
} else {
tokenBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public Builder clearToken() {
if (tokenBuilder_ == null) {
token_ = null;
onChanged();
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getTokenBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTokenFieldBuilder().getBuilder();
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
if (tokenBuilder_ != null) {
return tokenBuilder_.getMessageOrBuilder();
} else {
return token_ == null ?
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
}
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokenFieldBuilder() {
if (tokenBuilder_ == null) {
tokenBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
getToken(),
getParentForChildren(),
isClean());
token_ = null;
}
return tokenBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
if (traceInfoBuilder_ == null) {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
} else {
return traceInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
traceInfo_ = value;
onChanged();
} else {
traceInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public Builder setTraceInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
if (traceInfoBuilder_ == null) {
traceInfo_ = builderForValue.build();
onChanged();
} else {
traceInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
traceInfo_ != null &&
traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
traceInfo_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder(traceInfo_).mergeFrom(value).buildPartial();
} else {
traceInfo_ = value;
}
onChanged();
} else {
traceInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public Builder clearTraceInfo() {
if (traceInfoBuilder_ == null) {
traceInfo_ = null;
onChanged();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getTraceInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
if (traceInfoBuilder_ != null) {
return traceInfoBuilder_.getMessageOrBuilder();
} else {
return traceInfo_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>
getTraceInfoFieldBuilder() {
if (traceInfoBuilder_ == null) {
traceInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
getTraceInfo(),
getParentForChildren(),
isClean());
traceInfo_ = null;
}
return traceInfoBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BaseHeaderProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BaseHeaderProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public BaseHeaderProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new BaseHeaderProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface DataTransferTraceInfoProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.DataTransferTraceInfoProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* optional uint64 traceId = 1;
*/
boolean hasTraceId();
/**
* optional uint64 traceId = 1;
*/
long getTraceId();
/**
* optional uint64 parentId = 2;
*/
boolean hasParentId();
/**
* optional uint64 parentId = 2;
*/
long getParentId();
/**
* optional bytes spanContext = 3;
*/
boolean hasSpanContext();
/**
* optional bytes spanContext = 3;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString getSpanContext();
}
/**
* Protobuf type {@code hadoop.hdfs.DataTransferTraceInfoProto}
*/
public static final class DataTransferTraceInfoProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.DataTransferTraceInfoProto)
DataTransferTraceInfoProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use DataTransferTraceInfoProto.newBuilder() to construct.
private DataTransferTraceInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private DataTransferTraceInfoProto() {
spanContext_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DataTransferTraceInfoProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
traceId_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
parentId_ = input.readUInt64();
break;
}
case 26: {
bitField0_ |= 0x00000004;
spanContext_ = input.readBytes();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder.class);
}
private int bitField0_;
public static final int TRACEID_FIELD_NUMBER = 1;
private long traceId_;
/**
* optional uint64 traceId = 1;
*/
public boolean hasTraceId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional uint64 traceId = 1;
*/
public long getTraceId() {
return traceId_;
}
public static final int PARENTID_FIELD_NUMBER = 2;
private long parentId_;
/**
* optional uint64 parentId = 2;
*/
public boolean hasParentId() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional uint64 parentId = 2;
*/
public long getParentId() {
return parentId_;
}
public static final int SPANCONTEXT_FIELD_NUMBER = 3;
private org.apache.hadoop.thirdparty.protobuf.ByteString spanContext_;
/**
* optional bytes spanContext = 3;
*/
public boolean hasSpanContext() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional bytes spanContext = 3;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString getSpanContext() {
return spanContext_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt64(1, traceId_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(2, parentId_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeBytes(3, spanContext_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(1, traceId_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(2, parentId_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBytesSize(3, spanContext_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto) obj;
if (hasTraceId() != other.hasTraceId()) return false;
if (hasTraceId()) {
if (getTraceId()
!= other.getTraceId()) return false;
}
if (hasParentId() != other.hasParentId()) return false;
if (hasParentId()) {
if (getParentId()
!= other.getParentId()) return false;
}
if (hasSpanContext() != other.hasSpanContext()) return false;
if (hasSpanContext()) {
if (!getSpanContext()
.equals(other.getSpanContext())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasTraceId()) {
hash = (37 * hash) + TRACEID_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getTraceId());
}
if (hasParentId()) {
hash = (37 * hash) + PARENTID_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getParentId());
}
if (hasSpanContext()) {
hash = (37 * hash) + SPANCONTEXT_FIELD_NUMBER;
hash = (53 * hash) + getSpanContext().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DataTransferTraceInfoProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.DataTransferTraceInfoProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
traceId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
parentId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
spanContext_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.traceId_ = traceId_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.parentId_ = parentId_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
to_bitField0_ |= 0x00000004;
}
result.spanContext_ = spanContext_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) return this;
if (other.hasTraceId()) {
setTraceId(other.getTraceId());
}
if (other.hasParentId()) {
setParentId(other.getParentId());
}
if (other.hasSpanContext()) {
setSpanContext(other.getSpanContext());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private long traceId_ ;
/**
* optional uint64 traceId = 1;
*/
public boolean hasTraceId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional uint64 traceId = 1;
*/
public long getTraceId() {
return traceId_;
}
/**
* optional uint64 traceId = 1;
*/
public Builder setTraceId(long value) {
bitField0_ |= 0x00000001;
traceId_ = value;
onChanged();
return this;
}
/**
* optional uint64 traceId = 1;
*/
public Builder clearTraceId() {
bitField0_ = (bitField0_ & ~0x00000001);
traceId_ = 0L;
onChanged();
return this;
}
private long parentId_ ;
/**
* optional uint64 parentId = 2;
*/
public boolean hasParentId() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional uint64 parentId = 2;
*/
public long getParentId() {
return parentId_;
}
/**
* optional uint64 parentId = 2;
*/
public Builder setParentId(long value) {
bitField0_ |= 0x00000002;
parentId_ = value;
onChanged();
return this;
}
/**
* optional uint64 parentId = 2;
*/
public Builder clearParentId() {
bitField0_ = (bitField0_ & ~0x00000002);
parentId_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.ByteString spanContext_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
* optional bytes spanContext = 3;
*/
public boolean hasSpanContext() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional bytes spanContext = 3;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString getSpanContext() {
return spanContext_;
}
/**
* optional bytes spanContext = 3;
*/
public Builder setSpanContext(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
spanContext_ = value;
onChanged();
return this;
}
/**
* optional bytes spanContext = 3;
*/
public Builder clearSpanContext() {
bitField0_ = (bitField0_ & ~0x00000004);
spanContext_ = getDefaultInstance().getSpanContext();
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataTransferTraceInfoProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DataTransferTraceInfoProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public DataTransferTraceInfoProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new DataTransferTraceInfoProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ClientOperationHeaderProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ClientOperationHeaderProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
boolean hasBaseHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder();
/**
* required string clientName = 2;
*/
boolean hasClientName();
/**
* required string clientName = 2;
*/
java.lang.String getClientName();
/**
* required string clientName = 2;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getClientNameBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ClientOperationHeaderProto}
*/
public static final class ClientOperationHeaderProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ClientOperationHeaderProto)
ClientOperationHeaderProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ClientOperationHeaderProto.newBuilder() to construct.
private ClientOperationHeaderProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ClientOperationHeaderProto() {
clientName_ = "";
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ClientOperationHeaderProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = baseHeader_.toBuilder();
}
baseHeader_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(baseHeader_);
baseHeader_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
clientName_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder.class);
}
private int bitField0_;
public static final int BASEHEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_;
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public boolean hasBaseHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
return baseHeader_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : baseHeader_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
return baseHeader_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : baseHeader_;
}
public static final int CLIENTNAME_FIELD_NUMBER = 2;
private volatile java.lang.Object clientName_;
/**
* required string clientName = 2;
*/
public boolean hasClientName() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string clientName = 2;
*/
public java.lang.String getClientName() {
java.lang.Object ref = clientName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clientName_ = s;
}
return s;
}
}
/**
* required string clientName = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getClientNameBytes() {
java.lang.Object ref = clientName_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clientName_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBaseHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasClientName()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBaseHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getBaseHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, clientName_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getBaseHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, clientName_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) obj;
if (hasBaseHeader() != other.hasBaseHeader()) return false;
if (hasBaseHeader()) {
if (!getBaseHeader()
.equals(other.getBaseHeader())) return false;
}
if (hasClientName() != other.hasClientName()) return false;
if (hasClientName()) {
if (!getClientName()
.equals(other.getClientName())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBaseHeader()) {
hash = (37 * hash) + BASEHEADER_FIELD_NUMBER;
hash = (53 * hash) + getBaseHeader().hashCode();
}
if (hasClientName()) {
hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
hash = (53 * hash) + getClientName().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ClientOperationHeaderProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ClientOperationHeaderProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getBaseHeaderFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (baseHeaderBuilder_ == null) {
baseHeader_ = null;
} else {
baseHeaderBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
clientName_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (baseHeaderBuilder_ == null) {
result.baseHeader_ = baseHeader_;
} else {
result.baseHeader_ = baseHeaderBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.clientName_ = clientName_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) return this;
if (other.hasBaseHeader()) {
mergeBaseHeader(other.getBaseHeader());
}
if (other.hasClientName()) {
bitField0_ |= 0x00000002;
clientName_ = other.clientName_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBaseHeader()) {
return false;
}
if (!hasClientName()) {
return false;
}
if (!getBaseHeader().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> baseHeaderBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public boolean hasBaseHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
if (baseHeaderBuilder_ == null) {
return baseHeader_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : baseHeader_;
} else {
return baseHeaderBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public Builder setBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (baseHeaderBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
baseHeader_ = value;
onChanged();
} else {
baseHeaderBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public Builder setBaseHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (baseHeaderBuilder_ == null) {
baseHeader_ = builderForValue.build();
onChanged();
} else {
baseHeaderBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public Builder mergeBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (baseHeaderBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
baseHeader_ != null &&
baseHeader_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
baseHeader_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(baseHeader_).mergeFrom(value).buildPartial();
} else {
baseHeader_ = value;
}
onChanged();
} else {
baseHeaderBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public Builder clearBaseHeader() {
if (baseHeaderBuilder_ == null) {
baseHeader_ = null;
onChanged();
} else {
baseHeaderBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getBaseHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBaseHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
if (baseHeaderBuilder_ != null) {
return baseHeaderBuilder_.getMessageOrBuilder();
} else {
return baseHeader_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : baseHeader_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getBaseHeaderFieldBuilder() {
if (baseHeaderBuilder_ == null) {
baseHeaderBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
getBaseHeader(),
getParentForChildren(),
isClean());
baseHeader_ = null;
}
return baseHeaderBuilder_;
}
private java.lang.Object clientName_ = "";
/**
* required string clientName = 2;
*/
public boolean hasClientName() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string clientName = 2;
*/
public java.lang.String getClientName() {
java.lang.Object ref = clientName_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clientName_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string clientName = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getClientNameBytes() {
java.lang.Object ref = clientName_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clientName_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string clientName = 2;
*/
public Builder setClientName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
clientName_ = value;
onChanged();
return this;
}
/**
* required string clientName = 2;
*/
public Builder clearClientName() {
bitField0_ = (bitField0_ & ~0x00000002);
clientName_ = getDefaultInstance().getClientName();
onChanged();
return this;
}
/**
* required string clientName = 2;
*/
public Builder setClientNameBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
clientName_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ClientOperationHeaderProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientOperationHeaderProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ClientOperationHeaderProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ClientOperationHeaderProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface CachingStrategyProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.CachingStrategyProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* optional bool dropBehind = 1;
*/
boolean hasDropBehind();
/**
* optional bool dropBehind = 1;
*/
boolean getDropBehind();
/**
* optional int64 readahead = 2;
*/
boolean hasReadahead();
/**
* optional int64 readahead = 2;
*/
long getReadahead();
}
/**
* Protobuf type {@code hadoop.hdfs.CachingStrategyProto}
*/
public static final class CachingStrategyProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.CachingStrategyProto)
CachingStrategyProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use CachingStrategyProto.newBuilder() to construct.
private CachingStrategyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private CachingStrategyProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CachingStrategyProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
dropBehind_ = input.readBool();
break;
}
case 16: {
bitField0_ |= 0x00000002;
readahead_ = input.readInt64();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder.class);
}
private int bitField0_;
public static final int DROPBEHIND_FIELD_NUMBER = 1;
private boolean dropBehind_;
/**
* optional bool dropBehind = 1;
*/
public boolean hasDropBehind() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional bool dropBehind = 1;
*/
public boolean getDropBehind() {
return dropBehind_;
}
public static final int READAHEAD_FIELD_NUMBER = 2;
private long readahead_;
/**
* optional int64 readahead = 2;
*/
public boolean hasReadahead() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional int64 readahead = 2;
*/
public long getReadahead() {
return readahead_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeBool(1, dropBehind_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeInt64(2, readahead_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(1, dropBehind_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt64Size(2, readahead_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto) obj;
if (hasDropBehind() != other.hasDropBehind()) return false;
if (hasDropBehind()) {
if (getDropBehind()
!= other.getDropBehind()) return false;
}
if (hasReadahead() != other.hasReadahead()) return false;
if (hasReadahead()) {
if (getReadahead()
!= other.getReadahead()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasDropBehind()) {
hash = (37 * hash) + DROPBEHIND_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getDropBehind());
}
if (hasReadahead()) {
hash = (37 * hash) + READAHEAD_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getReadahead());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CachingStrategyProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.CachingStrategyProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
dropBehind_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
readahead_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.dropBehind_ = dropBehind_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.readahead_ = readahead_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) return this;
if (other.hasDropBehind()) {
setDropBehind(other.getDropBehind());
}
if (other.hasReadahead()) {
setReadahead(other.getReadahead());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private boolean dropBehind_ ;
/**
* optional bool dropBehind = 1;
*/
public boolean hasDropBehind() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional bool dropBehind = 1;
*/
public boolean getDropBehind() {
return dropBehind_;
}
/**
* optional bool dropBehind = 1;
*/
public Builder setDropBehind(boolean value) {
bitField0_ |= 0x00000001;
dropBehind_ = value;
onChanged();
return this;
}
/**
* optional bool dropBehind = 1;
*/
public Builder clearDropBehind() {
bitField0_ = (bitField0_ & ~0x00000001);
dropBehind_ = false;
onChanged();
return this;
}
private long readahead_ ;
/**
* optional int64 readahead = 2;
*/
public boolean hasReadahead() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional int64 readahead = 2;
*/
public long getReadahead() {
return readahead_;
}
/**
* optional int64 readahead = 2;
*/
public Builder setReadahead(long value) {
bitField0_ |= 0x00000002;
readahead_ = value;
onChanged();
return this;
}
/**
* optional int64 readahead = 2;
*/
public Builder clearReadahead() {
bitField0_ = (bitField0_ & ~0x00000002);
readahead_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CachingStrategyProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CachingStrategyProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public CachingStrategyProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new CachingStrategyProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpReadBlockProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpReadBlockProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
/**
* required uint64 offset = 2;
*/
boolean hasOffset();
/**
* required uint64 offset = 2;
*/
long getOffset();
/**
* required uint64 len = 3;
*/
boolean hasLen();
/**
* required uint64 len = 3;
*/
long getLen();
/**
* optional bool sendChecksums = 4 [default = true];
*/
boolean hasSendChecksums();
/**
* optional bool sendChecksums = 4 [default = true];
*/
boolean getSendChecksums();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
boolean hasCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpReadBlockProto}
*/
public static final class OpReadBlockProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpReadBlockProto)
OpReadBlockProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpReadBlockProto.newBuilder() to construct.
private OpReadBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpReadBlockProto() {
sendChecksums_ = true;
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpReadBlockProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
offset_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
len_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
sendChecksums_ = input.readBool();
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) != 0)) {
subBuilder = cachingStrategy_.toBuilder();
}
cachingStrategy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(cachingStrategy_);
cachingStrategy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.Builder.class);
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
public static final int OFFSET_FIELD_NUMBER = 2;
private long offset_;
/**
* required uint64 offset = 2;
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required uint64 offset = 2;
*/
public long getOffset() {
return offset_;
}
public static final int LEN_FIELD_NUMBER = 3;
private long len_;
/**
* required uint64 len = 3;
*/
public boolean hasLen() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required uint64 len = 3;
*/
public long getLen() {
return len_;
}
public static final int SENDCHECKSUMS_FIELD_NUMBER = 4;
private boolean sendChecksums_;
/**
* optional bool sendChecksums = 4 [default = true];
*/
public boolean hasSendChecksums() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional bool sendChecksums = 4 [default = true];
*/
public boolean getSendChecksums() {
return sendChecksums_;
}
public static final int CACHINGSTRATEGY_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasOffset()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLen()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(2, offset_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeUInt64(3, len_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeBool(4, sendChecksums_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeMessage(5, getCachingStrategy());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(2, offset_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(3, len_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(4, sendChecksums_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(5, getCachingStrategy());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (hasOffset() != other.hasOffset()) return false;
if (hasOffset()) {
if (getOffset()
!= other.getOffset()) return false;
}
if (hasLen() != other.hasLen()) return false;
if (hasLen()) {
if (getLen()
!= other.getLen()) return false;
}
if (hasSendChecksums() != other.hasSendChecksums()) return false;
if (hasSendChecksums()) {
if (getSendChecksums()
!= other.getSendChecksums()) return false;
}
if (hasCachingStrategy() != other.hasCachingStrategy()) return false;
if (hasCachingStrategy()) {
if (!getCachingStrategy()
.equals(other.getCachingStrategy())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasOffset()) {
hash = (37 * hash) + OFFSET_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getOffset());
}
if (hasLen()) {
hash = (37 * hash) + LEN_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getLen());
}
if (hasSendChecksums()) {
hash = (37 * hash) + SENDCHECKSUMS_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getSendChecksums());
}
if (hasCachingStrategy()) {
hash = (37 * hash) + CACHINGSTRATEGY_FIELD_NUMBER;
hash = (53 * hash) + getCachingStrategy().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpReadBlockProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpReadBlockProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getCachingStrategyFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = null;
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
offset_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
len_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
sendChecksums_ = true;
bitField0_ = (bitField0_ & ~0x00000008);
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = null;
} else {
cachingStrategyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.offset_ = offset_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.len_ = len_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
to_bitField0_ |= 0x00000008;
}
result.sendChecksums_ = sendChecksums_;
if (((from_bitField0_ & 0x00000010) != 0)) {
if (cachingStrategyBuilder_ == null) {
result.cachingStrategy_ = cachingStrategy_;
} else {
result.cachingStrategy_ = cachingStrategyBuilder_.build();
}
to_bitField0_ |= 0x00000010;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasOffset()) {
setOffset(other.getOffset());
}
if (other.hasLen()) {
setLen(other.getLen());
}
if (other.hasSendChecksums()) {
setSendChecksums(other.getSendChecksums());
}
if (other.hasCachingStrategy()) {
mergeCachingStrategy(other.getCachingStrategy());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasOffset()) {
return false;
}
if (!hasLen()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = null;
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private long offset_ ;
/**
* required uint64 offset = 2;
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required uint64 offset = 2;
*/
public long getOffset() {
return offset_;
}
/**
* required uint64 offset = 2;
*/
public Builder setOffset(long value) {
bitField0_ |= 0x00000002;
offset_ = value;
onChanged();
return this;
}
/**
* required uint64 offset = 2;
*/
public Builder clearOffset() {
bitField0_ = (bitField0_ & ~0x00000002);
offset_ = 0L;
onChanged();
return this;
}
private long len_ ;
/**
* required uint64 len = 3;
*/
public boolean hasLen() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required uint64 len = 3;
*/
public long getLen() {
return len_;
}
/**
* required uint64 len = 3;
*/
public Builder setLen(long value) {
bitField0_ |= 0x00000004;
len_ = value;
onChanged();
return this;
}
/**
* required uint64 len = 3;
*/
public Builder clearLen() {
bitField0_ = (bitField0_ & ~0x00000004);
len_ = 0L;
onChanged();
return this;
}
private boolean sendChecksums_ = true;
/**
* optional bool sendChecksums = 4 [default = true];
*/
public boolean hasSendChecksums() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional bool sendChecksums = 4 [default = true];
*/
public boolean getSendChecksums() {
return sendChecksums_;
}
/**
* optional bool sendChecksums = 4 [default = true];
*/
public Builder setSendChecksums(boolean value) {
bitField0_ |= 0x00000008;
sendChecksums_ = value;
onChanged();
return this;
}
/**
* optional bool sendChecksums = 4 [default = true];
*/
public Builder clearSendChecksums() {
bitField0_ = (bitField0_ & ~0x00000008);
sendChecksums_ = true;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> cachingStrategyBuilder_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
} else {
return cachingStrategyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public Builder setCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cachingStrategy_ = value;
onChanged();
} else {
cachingStrategyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public Builder setCachingStrategy(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder builderForValue) {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = builderForValue.build();
onChanged();
} else {
cachingStrategyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public Builder mergeCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (((bitField0_ & 0x00000010) != 0) &&
cachingStrategy_ != null &&
cachingStrategy_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) {
cachingStrategy_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.newBuilder(cachingStrategy_).mergeFrom(value).buildPartial();
} else {
cachingStrategy_ = value;
}
onChanged();
} else {
cachingStrategyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public Builder clearCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = null;
onChanged();
} else {
cachingStrategyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder getCachingStrategyBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getCachingStrategyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
if (cachingStrategyBuilder_ != null) {
return cachingStrategyBuilder_.getMessageOrBuilder();
} else {
return cachingStrategy_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>
getCachingStrategyFieldBuilder() {
if (cachingStrategyBuilder_ == null) {
cachingStrategyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>(
getCachingStrategy(),
getParentForChildren(),
isClean());
cachingStrategy_ = null;
}
return cachingStrategyBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpReadBlockProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpReadBlockProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpReadBlockProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpReadBlockProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ChecksumProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ChecksumProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
boolean hasType();
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType();
/**
* required uint32 bytesPerChecksum = 2;
*/
boolean hasBytesPerChecksum();
/**
* required uint32 bytesPerChecksum = 2;
*/
int getBytesPerChecksum();
}
/**
* Protobuf type {@code hadoop.hdfs.ChecksumProto}
*/
public static final class ChecksumProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ChecksumProto)
ChecksumProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ChecksumProto.newBuilder() to construct.
private ChecksumProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ChecksumProto() {
type_ = 0;
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ChecksumProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
type_ = rawValue;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
bytesPerChecksum_ = input.readUInt32();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder.class);
}
private int bitField0_;
public static final int TYPE_FIELD_NUMBER = 1;
private int type_;
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(type_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
}
public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
private int bytesPerChecksum_;
/**
* required uint32 bytesPerChecksum = 2;
*/
public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public int getBytesPerChecksum() {
return bytesPerChecksum_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBytesPerChecksum()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, type_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt32(2, bytesPerChecksum_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, type_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(2, bytesPerChecksum_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) obj;
if (hasType() != other.hasType()) return false;
if (hasType()) {
if (type_ != other.type_) return false;
}
if (hasBytesPerChecksum() != other.hasBytesPerChecksum()) return false;
if (hasBytesPerChecksum()) {
if (getBytesPerChecksum()
!= other.getBytesPerChecksum()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasType()) {
hash = (37 * hash) + TYPE_FIELD_NUMBER;
hash = (53 * hash) + type_;
}
if (hasBytesPerChecksum()) {
hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getBytesPerChecksum();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ChecksumProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ChecksumProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
type_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
bytesPerChecksum_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.type_ = type_;
if (((from_bitField0_ & 0x00000002) != 0)) {
result.bytesPerChecksum_ = bytesPerChecksum_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) return this;
if (other.hasType()) {
setType(other.getType());
}
if (other.hasBytesPerChecksum()) {
setBytesPerChecksum(other.getBytesPerChecksum());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasType()) {
return false;
}
if (!hasBytesPerChecksum()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int type_ = 0;
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(type_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
}
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
type_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public Builder clearType() {
bitField0_ = (bitField0_ & ~0x00000001);
type_ = 0;
onChanged();
return this;
}
private int bytesPerChecksum_ ;
/**
* required uint32 bytesPerChecksum = 2;
*/
public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public int getBytesPerChecksum() {
return bytesPerChecksum_;
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public Builder setBytesPerChecksum(int value) {
bitField0_ |= 0x00000002;
bytesPerChecksum_ = value;
onChanged();
return this;
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public Builder clearBytesPerChecksum() {
bitField0_ = (bitField0_ & ~0x00000002);
bytesPerChecksum_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ChecksumProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ChecksumProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ChecksumProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ChecksumProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpWriteBlockProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpWriteBlockProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
java.util.List
getTargetsList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
int getTargetsCount();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index);
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
boolean hasSource();
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
boolean hasStage();
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage();
/**
* required uint32 pipelineSize = 5;
*/
boolean hasPipelineSize();
/**
* required uint32 pipelineSize = 5;
*/
int getPipelineSize();
/**
* required uint64 minBytesRcvd = 6;
*/
boolean hasMinBytesRcvd();
/**
* required uint64 minBytesRcvd = 6;
*/
long getMinBytesRcvd();
/**
* required uint64 maxBytesRcvd = 7;
*/
boolean hasMaxBytesRcvd();
/**
* required uint64 maxBytesRcvd = 7;
*/
long getMaxBytesRcvd();
/**
* required uint64 latestGenerationStamp = 8;
*/
boolean hasLatestGenerationStamp();
/**
* required uint64 latestGenerationStamp = 8;
*/
long getLatestGenerationStamp();
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
boolean hasRequestedChecksum();
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum();
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
boolean hasCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
boolean hasStorageType();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
java.util.List getTargetStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
int getTargetStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index);
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
*/
boolean hasAllowLazyPersist();
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
*/
boolean getAllowLazyPersist();
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
*/
boolean hasPinning();
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
*/
boolean getPinning();
/**
* repeated bool targetPinnings = 15;
*/
java.util.List getTargetPinningsList();
/**
* repeated bool targetPinnings = 15;
*/
int getTargetPinningsCount();
/**
* repeated bool targetPinnings = 15;
*/
boolean getTargetPinnings(int index);
/**
* optional string storageId = 16;
*/
boolean hasStorageId();
/**
* optional string storageId = 16;
*/
java.lang.String getStorageId();
/**
* optional string storageId = 16;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getStorageIdBytes();
/**
* repeated string targetStorageIds = 17;
*/
java.util.List
getTargetStorageIdsList();
/**
* repeated string targetStorageIds = 17;
*/
int getTargetStorageIdsCount();
/**
* repeated string targetStorageIds = 17;
*/
java.lang.String getTargetStorageIds(int index);
/**
* repeated string targetStorageIds = 17;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getTargetStorageIdsBytes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.OpWriteBlockProto}
*/
public static final class OpWriteBlockProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpWriteBlockProto)
OpWriteBlockProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpWriteBlockProto.newBuilder() to construct.
private OpWriteBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpWriteBlockProto() {
targets_ = java.util.Collections.emptyList();
stage_ = 0;
storageType_ = 1;
targetStorageTypes_ = java.util.Collections.emptyList();
targetPinnings_ = emptyBooleanList();
storageId_ = "";
targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpWriteBlockProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) != 0)) {
targets_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
targets_.add(
input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry));
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) != 0)) {
subBuilder = source_.toBuilder();
}
source_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(source_);
source_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 32: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000004;
stage_ = rawValue;
}
break;
}
case 40: {
bitField0_ |= 0x00000008;
pipelineSize_ = input.readUInt32();
break;
}
case 48: {
bitField0_ |= 0x00000010;
minBytesRcvd_ = input.readUInt64();
break;
}
case 56: {
bitField0_ |= 0x00000020;
maxBytesRcvd_ = input.readUInt64();
break;
}
case 64: {
bitField0_ |= 0x00000040;
latestGenerationStamp_ = input.readUInt64();
break;
}
case 74: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000080) != 0)) {
subBuilder = requestedChecksum_.toBuilder();
}
requestedChecksum_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(requestedChecksum_);
requestedChecksum_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000080;
break;
}
case 82: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000100) != 0)) {
subBuilder = cachingStrategy_.toBuilder();
}
cachingStrategy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(cachingStrategy_);
cachingStrategy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000100;
break;
}
case 88: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(11, rawValue);
} else {
bitField0_ |= 0x00000200;
storageType_ = rawValue;
}
break;
}
case 96: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(12, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000800) != 0)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000800;
}
targetStorageTypes_.add(rawValue);
}
break;
}
case 98: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(12, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000800) != 0)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000800;
}
targetStorageTypes_.add(rawValue);
}
}
input.popLimit(oldLimit);
break;
}
case 104: {
bitField0_ |= 0x00000400;
allowLazyPersist_ = input.readBool();
break;
}
case 112: {
bitField0_ |= 0x00000800;
pinning_ = input.readBool();
break;
}
case 120: {
if (!((mutable_bitField0_ & 0x00004000) != 0)) {
targetPinnings_ = newBooleanList();
mutable_bitField0_ |= 0x00004000;
}
targetPinnings_.addBoolean(input.readBool());
break;
}
case 122: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00004000) != 0) && input.getBytesUntilLimit() > 0) {
targetPinnings_ = newBooleanList();
mutable_bitField0_ |= 0x00004000;
}
while (input.getBytesUntilLimit() > 0) {
targetPinnings_.addBoolean(input.readBool());
}
input.popLimit(limit);
break;
}
case 130: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00001000;
storageId_ = bs;
break;
}
case 138: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
if (!((mutable_bitField0_ & 0x00010000) != 0)) {
targetStorageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00010000;
}
targetStorageIds_.add(bs);
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) != 0)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
}
if (((mutable_bitField0_ & 0x00000800) != 0)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
}
if (((mutable_bitField0_ & 0x00004000) != 0)) {
targetPinnings_.makeImmutable(); // C
}
if (((mutable_bitField0_ & 0x00010000) != 0)) {
targetStorageIds_ = targetStorageIds_.getUnmodifiableView();
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class);
}
/**
* Protobuf enum {@code hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage}
*/
public enum BlockConstructionStage
implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
/**
* PIPELINE_SETUP_APPEND = 0;
*/
PIPELINE_SETUP_APPEND(0),
/**
*
* pipeline set up for failed PIPELINE_SETUP_APPEND recovery
*
*
* PIPELINE_SETUP_APPEND_RECOVERY = 1;
*/
PIPELINE_SETUP_APPEND_RECOVERY(1),
/**
*
* data streaming
*
*
* DATA_STREAMING = 2;
*/
DATA_STREAMING(2),
/**
*
* pipeline setup for failed data streaming recovery
*
*
* PIPELINE_SETUP_STREAMING_RECOVERY = 3;
*/
PIPELINE_SETUP_STREAMING_RECOVERY(3),
/**
*
* close the block and pipeline
*
*
* PIPELINE_CLOSE = 4;
*/
PIPELINE_CLOSE(4),
/**
*
* Recover a failed PIPELINE_CLOSE
*
*
* PIPELINE_CLOSE_RECOVERY = 5;
*/
PIPELINE_CLOSE_RECOVERY(5),
/**
*
* pipeline set up for block creation
*
*
* PIPELINE_SETUP_CREATE = 6;
*/
PIPELINE_SETUP_CREATE(6),
/**
*
* transfer RBW for adding datanodes
*
*
* TRANSFER_RBW = 7;
*/
TRANSFER_RBW(7),
/**
*
* transfer Finalized for adding datanodes
*
*
* TRANSFER_FINALIZED = 8;
*/
TRANSFER_FINALIZED(8),
;
/**
* PIPELINE_SETUP_APPEND = 0;
*/
public static final int PIPELINE_SETUP_APPEND_VALUE = 0;
/**
*
* pipeline set up for failed PIPELINE_SETUP_APPEND recovery
*
*
* PIPELINE_SETUP_APPEND_RECOVERY = 1;
*/
public static final int PIPELINE_SETUP_APPEND_RECOVERY_VALUE = 1;
/**
*
* data streaming
*
*
* DATA_STREAMING = 2;
*/
public static final int DATA_STREAMING_VALUE = 2;
/**
*
* pipeline setup for failed data streaming recovery
*
*
* PIPELINE_SETUP_STREAMING_RECOVERY = 3;
*/
public static final int PIPELINE_SETUP_STREAMING_RECOVERY_VALUE = 3;
/**
*
* close the block and pipeline
*
*
* PIPELINE_CLOSE = 4;
*/
public static final int PIPELINE_CLOSE_VALUE = 4;
/**
*
* Recover a failed PIPELINE_CLOSE
*
*
* PIPELINE_CLOSE_RECOVERY = 5;
*/
public static final int PIPELINE_CLOSE_RECOVERY_VALUE = 5;
/**
*
* pipeline set up for block creation
*
*
* PIPELINE_SETUP_CREATE = 6;
*/
public static final int PIPELINE_SETUP_CREATE_VALUE = 6;
/**
*
* transfer RBW for adding datanodes
*
*
* TRANSFER_RBW = 7;
*/
public static final int TRANSFER_RBW_VALUE = 7;
/**
*
* transfer Finalized for adding datanodes
*
*
* TRANSFER_FINALIZED = 8;
*/
public static final int TRANSFER_FINALIZED_VALUE = 8;
public final int getNumber() {
return value;
}
/**
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static BlockConstructionStage valueOf(int value) {
return forNumber(value);
}
public static BlockConstructionStage forNumber(int value) {
switch (value) {
case 0: return PIPELINE_SETUP_APPEND;
case 1: return PIPELINE_SETUP_APPEND_RECOVERY;
case 2: return DATA_STREAMING;
case 3: return PIPELINE_SETUP_STREAMING_RECOVERY;
case 4: return PIPELINE_CLOSE;
case 5: return PIPELINE_CLOSE_RECOVERY;
case 6: return PIPELINE_SETUP_CREATE;
case 7: return TRANSFER_RBW;
case 8: return TRANSFER_FINALIZED;
default: return null;
}
}
public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
BlockConstructionStage> internalValueMap =
new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
public BlockConstructionStage findValueByNumber(int number) {
return BlockConstructionStage.forNumber(number);
}
};
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor().getEnumTypes().get(0);
}
private static final BlockConstructionStage[] VALUES = values();
public static BlockConstructionStage valueOf(
org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private BlockConstructionStage(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage)
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
public static final int TARGETS_FIELD_NUMBER = 2;
private java.util.List targets_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
return targets_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
return targets_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
return targets_.get(index);
}
public static final int SOURCE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
}
public static final int STAGE_FIELD_NUMBER = 4;
private int stage_;
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public boolean hasStage() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.valueOf(stage_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND : result;
}
public static final int PIPELINESIZE_FIELD_NUMBER = 5;
private int pipelineSize_;
/**
* required uint32 pipelineSize = 5;
*/
public boolean hasPipelineSize() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required uint32 pipelineSize = 5;
*/
public int getPipelineSize() {
return pipelineSize_;
}
public static final int MINBYTESRCVD_FIELD_NUMBER = 6;
private long minBytesRcvd_;
/**
* required uint64 minBytesRcvd = 6;
*/
public boolean hasMinBytesRcvd() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* required uint64 minBytesRcvd = 6;
*/
public long getMinBytesRcvd() {
return minBytesRcvd_;
}
public static final int MAXBYTESRCVD_FIELD_NUMBER = 7;
private long maxBytesRcvd_;
/**
* required uint64 maxBytesRcvd = 7;
*/
public boolean hasMaxBytesRcvd() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* required uint64 maxBytesRcvd = 7;
*/
public long getMaxBytesRcvd() {
return maxBytesRcvd_;
}
public static final int LATESTGENERATIONSTAMP_FIELD_NUMBER = 8;
private long latestGenerationStamp_;
/**
* required uint64 latestGenerationStamp = 8;
*/
public boolean hasLatestGenerationStamp() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* required uint64 latestGenerationStamp = 8;
*/
public long getLatestGenerationStamp() {
return latestGenerationStamp_;
}
public static final int REQUESTEDCHECKSUM_FIELD_NUMBER = 9;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_;
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public boolean hasRequestedChecksum() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
return requestedChecksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
return requestedChecksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
}
public static final int CACHINGSTRATEGY_FIELD_NUMBER = 10;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000100) != 0);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
public static final int STORAGETYPE_FIELD_NUMBER = 11;
private int storageType_;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000200) != 0);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 12;
private java.util.List targetStorageTypes_;
private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> targetStorageTypes_converter_ =
new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() {
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(from);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
};
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public java.util.List getTargetStorageTypesList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
}
public static final int ALLOWLAZYPERSIST_FIELD_NUMBER = 13;
private boolean allowLazyPersist_;
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
*/
public boolean hasAllowLazyPersist() {
return ((bitField0_ & 0x00000400) != 0);
}
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
*/
public boolean getAllowLazyPersist() {
return allowLazyPersist_;
}
public static final int PINNING_FIELD_NUMBER = 14;
private boolean pinning_;
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
*/
public boolean hasPinning() {
return ((bitField0_ & 0x00000800) != 0);
}
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
*/
public boolean getPinning() {
return pinning_;
}
public static final int TARGETPINNINGS_FIELD_NUMBER = 15;
private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList targetPinnings_;
/**
* repeated bool targetPinnings = 15;
*/
public java.util.List
getTargetPinningsList() {
return targetPinnings_;
}
/**
* repeated bool targetPinnings = 15;
*/
public int getTargetPinningsCount() {
return targetPinnings_.size();
}
/**
* repeated bool targetPinnings = 15;
*/
public boolean getTargetPinnings(int index) {
return targetPinnings_.getBoolean(index);
}
public static final int STORAGEID_FIELD_NUMBER = 16;
private volatile java.lang.Object storageId_;
/**
* optional string storageId = 16;
*/
public boolean hasStorageId() {
return ((bitField0_ & 0x00001000) != 0);
}
/**
* optional string storageId = 16;
*/
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageId_ = s;
}
return s;
}
}
/**
* optional string storageId = 16;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int TARGETSTORAGEIDS_FIELD_NUMBER = 17;
private org.apache.hadoop.thirdparty.protobuf.LazyStringList targetStorageIds_;
/**
* repeated string targetStorageIds = 17;
*/
public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
getTargetStorageIdsList() {
return targetStorageIds_;
}
/**
* repeated string targetStorageIds = 17;
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 17;
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 17;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStage()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPipelineSize()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMinBytesRcvd()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMaxBytesRcvd()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLatestGenerationStamp()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasRequestedChecksum()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasSource()) {
if (!getSource().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getRequestedChecksum().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
for (int i = 0; i < targets_.size(); i++) {
output.writeMessage(2, targets_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(3, getSource());
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeEnum(4, stage_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeUInt32(5, pipelineSize_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeUInt64(6, minBytesRcvd_);
}
if (((bitField0_ & 0x00000020) != 0)) {
output.writeUInt64(7, maxBytesRcvd_);
}
if (((bitField0_ & 0x00000040) != 0)) {
output.writeUInt64(8, latestGenerationStamp_);
}
if (((bitField0_ & 0x00000080) != 0)) {
output.writeMessage(9, getRequestedChecksum());
}
if (((bitField0_ & 0x00000100) != 0)) {
output.writeMessage(10, getCachingStrategy());
}
if (((bitField0_ & 0x00000200) != 0)) {
output.writeEnum(11, storageType_);
}
for (int i = 0; i < targetStorageTypes_.size(); i++) {
output.writeEnum(12, targetStorageTypes_.get(i));
}
if (((bitField0_ & 0x00000400) != 0)) {
output.writeBool(13, allowLazyPersist_);
}
if (((bitField0_ & 0x00000800) != 0)) {
output.writeBool(14, pinning_);
}
for (int i = 0; i < targetPinnings_.size(); i++) {
output.writeBool(15, targetPinnings_.getBoolean(i));
}
if (((bitField0_ & 0x00001000) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 16, storageId_);
}
for (int i = 0; i < targetStorageIds_.size(); i++) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 17, targetStorageIds_.getRaw(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
for (int i = 0; i < targets_.size(); i++) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(2, targets_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getSource());
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(4, stage_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(5, pipelineSize_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(6, minBytesRcvd_);
}
if (((bitField0_ & 0x00000020) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(7, maxBytesRcvd_);
}
if (((bitField0_ & 0x00000040) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(8, latestGenerationStamp_);
}
if (((bitField0_ & 0x00000080) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(9, getRequestedChecksum());
}
if (((bitField0_ & 0x00000100) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(10, getCachingStrategy());
}
if (((bitField0_ & 0x00000200) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(11, storageType_);
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageTypes_.size(); i++) {
dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSizeNoTag(targetStorageTypes_.get(i));
}
size += dataSize;
size += 1 * targetStorageTypes_.size();
}
if (((bitField0_ & 0x00000400) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(13, allowLazyPersist_);
}
if (((bitField0_ & 0x00000800) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(14, pinning_);
}
{
int dataSize = 0;
dataSize = 1 * getTargetPinningsList().size();
size += dataSize;
size += 1 * getTargetPinningsList().size();
}
if (((bitField0_ & 0x00001000) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(16, storageId_);
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageIds_.size(); i++) {
dataSize += computeStringSizeNoTag(targetStorageIds_.getRaw(i));
}
size += dataSize;
size += 2 * getTargetStorageIdsList().size();
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (!getTargetsList()
.equals(other.getTargetsList())) return false;
if (hasSource() != other.hasSource()) return false;
if (hasSource()) {
if (!getSource()
.equals(other.getSource())) return false;
}
if (hasStage() != other.hasStage()) return false;
if (hasStage()) {
if (stage_ != other.stage_) return false;
}
if (hasPipelineSize() != other.hasPipelineSize()) return false;
if (hasPipelineSize()) {
if (getPipelineSize()
!= other.getPipelineSize()) return false;
}
if (hasMinBytesRcvd() != other.hasMinBytesRcvd()) return false;
if (hasMinBytesRcvd()) {
if (getMinBytesRcvd()
!= other.getMinBytesRcvd()) return false;
}
if (hasMaxBytesRcvd() != other.hasMaxBytesRcvd()) return false;
if (hasMaxBytesRcvd()) {
if (getMaxBytesRcvd()
!= other.getMaxBytesRcvd()) return false;
}
if (hasLatestGenerationStamp() != other.hasLatestGenerationStamp()) return false;
if (hasLatestGenerationStamp()) {
if (getLatestGenerationStamp()
!= other.getLatestGenerationStamp()) return false;
}
if (hasRequestedChecksum() != other.hasRequestedChecksum()) return false;
if (hasRequestedChecksum()) {
if (!getRequestedChecksum()
.equals(other.getRequestedChecksum())) return false;
}
if (hasCachingStrategy() != other.hasCachingStrategy()) return false;
if (hasCachingStrategy()) {
if (!getCachingStrategy()
.equals(other.getCachingStrategy())) return false;
}
if (hasStorageType() != other.hasStorageType()) return false;
if (hasStorageType()) {
if (storageType_ != other.storageType_) return false;
}
if (!targetStorageTypes_.equals(other.targetStorageTypes_)) return false;
if (hasAllowLazyPersist() != other.hasAllowLazyPersist()) return false;
if (hasAllowLazyPersist()) {
if (getAllowLazyPersist()
!= other.getAllowLazyPersist()) return false;
}
if (hasPinning() != other.hasPinning()) return false;
if (hasPinning()) {
if (getPinning()
!= other.getPinning()) return false;
}
if (!getTargetPinningsList()
.equals(other.getTargetPinningsList())) return false;
if (hasStorageId() != other.hasStorageId()) return false;
if (hasStorageId()) {
if (!getStorageId()
.equals(other.getStorageId())) return false;
}
if (!getTargetStorageIdsList()
.equals(other.getTargetStorageIdsList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (getTargetsCount() > 0) {
hash = (37 * hash) + TARGETS_FIELD_NUMBER;
hash = (53 * hash) + getTargetsList().hashCode();
}
if (hasSource()) {
hash = (37 * hash) + SOURCE_FIELD_NUMBER;
hash = (53 * hash) + getSource().hashCode();
}
if (hasStage()) {
hash = (37 * hash) + STAGE_FIELD_NUMBER;
hash = (53 * hash) + stage_;
}
if (hasPipelineSize()) {
hash = (37 * hash) + PIPELINESIZE_FIELD_NUMBER;
hash = (53 * hash) + getPipelineSize();
}
if (hasMinBytesRcvd()) {
hash = (37 * hash) + MINBYTESRCVD_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getMinBytesRcvd());
}
if (hasMaxBytesRcvd()) {
hash = (37 * hash) + MAXBYTESRCVD_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getMaxBytesRcvd());
}
if (hasLatestGenerationStamp()) {
hash = (37 * hash) + LATESTGENERATIONSTAMP_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getLatestGenerationStamp());
}
if (hasRequestedChecksum()) {
hash = (37 * hash) + REQUESTEDCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getRequestedChecksum().hashCode();
}
if (hasCachingStrategy()) {
hash = (37 * hash) + CACHINGSTRATEGY_FIELD_NUMBER;
hash = (53 * hash) + getCachingStrategy().hashCode();
}
if (hasStorageType()) {
hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
hash = (53 * hash) + storageType_;
}
if (getTargetStorageTypesCount() > 0) {
hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + targetStorageTypes_.hashCode();
}
if (hasAllowLazyPersist()) {
hash = (37 * hash) + ALLOWLAZYPERSIST_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getAllowLazyPersist());
}
if (hasPinning()) {
hash = (37 * hash) + PINNING_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getPinning());
}
if (getTargetPinningsCount() > 0) {
hash = (37 * hash) + TARGETPINNINGS_FIELD_NUMBER;
hash = (53 * hash) + getTargetPinningsList().hashCode();
}
if (hasStorageId()) {
hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
hash = (53 * hash) + getStorageId().hashCode();
}
if (getTargetStorageIdsCount() > 0) {
hash = (37 * hash) + TARGETSTORAGEIDS_FIELD_NUMBER;
hash = (53 * hash) + getTargetStorageIdsList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpWriteBlockProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpWriteBlockProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getTargetsFieldBuilder();
getSourceFieldBuilder();
getRequestedChecksumFieldBuilder();
getCachingStrategyFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = null;
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
targetsBuilder_.clear();
}
if (sourceBuilder_ == null) {
source_ = null;
} else {
sourceBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
stage_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
pipelineSize_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
minBytesRcvd_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
maxBytesRcvd_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
latestGenerationStamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
if (requestedChecksumBuilder_ == null) {
requestedChecksum_ = null;
} else {
requestedChecksumBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000100);
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = null;
} else {
cachingStrategyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000200);
storageType_ = 1;
bitField0_ = (bitField0_ & ~0x00000400);
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000800);
allowLazyPersist_ = false;
bitField0_ = (bitField0_ & ~0x00001000);
pinning_ = false;
bitField0_ = (bitField0_ & ~0x00002000);
targetPinnings_ = emptyBooleanList();
bitField0_ = (bitField0_ & ~0x00004000);
storageId_ = "";
bitField0_ = (bitField0_ & ~0x00008000);
targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (targetsBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.targets_ = targets_;
} else {
result.targets_ = targetsBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) != 0)) {
if (sourceBuilder_ == null) {
result.source_ = source_;
} else {
result.source_ = sourceBuilder_.build();
}
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
to_bitField0_ |= 0x00000004;
}
result.stage_ = stage_;
if (((from_bitField0_ & 0x00000010) != 0)) {
result.pipelineSize_ = pipelineSize_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
result.minBytesRcvd_ = minBytesRcvd_;
to_bitField0_ |= 0x00000010;
}
if (((from_bitField0_ & 0x00000040) != 0)) {
result.maxBytesRcvd_ = maxBytesRcvd_;
to_bitField0_ |= 0x00000020;
}
if (((from_bitField0_ & 0x00000080) != 0)) {
result.latestGenerationStamp_ = latestGenerationStamp_;
to_bitField0_ |= 0x00000040;
}
if (((from_bitField0_ & 0x00000100) != 0)) {
if (requestedChecksumBuilder_ == null) {
result.requestedChecksum_ = requestedChecksum_;
} else {
result.requestedChecksum_ = requestedChecksumBuilder_.build();
}
to_bitField0_ |= 0x00000080;
}
if (((from_bitField0_ & 0x00000200) != 0)) {
if (cachingStrategyBuilder_ == null) {
result.cachingStrategy_ = cachingStrategy_;
} else {
result.cachingStrategy_ = cachingStrategyBuilder_.build();
}
to_bitField0_ |= 0x00000100;
}
if (((from_bitField0_ & 0x00000400) != 0)) {
to_bitField0_ |= 0x00000200;
}
result.storageType_ = storageType_;
if (((bitField0_ & 0x00000800) != 0)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
bitField0_ = (bitField0_ & ~0x00000800);
}
result.targetStorageTypes_ = targetStorageTypes_;
if (((from_bitField0_ & 0x00001000) != 0)) {
result.allowLazyPersist_ = allowLazyPersist_;
to_bitField0_ |= 0x00000400;
}
if (((from_bitField0_ & 0x00002000) != 0)) {
result.pinning_ = pinning_;
to_bitField0_ |= 0x00000800;
}
if (((bitField0_ & 0x00004000) != 0)) {
targetPinnings_.makeImmutable();
bitField0_ = (bitField0_ & ~0x00004000);
}
result.targetPinnings_ = targetPinnings_;
if (((from_bitField0_ & 0x00008000) != 0)) {
to_bitField0_ |= 0x00001000;
}
result.storageId_ = storageId_;
if (((bitField0_ & 0x00010000) != 0)) {
targetStorageIds_ = targetStorageIds_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00010000);
}
result.targetStorageIds_ = targetStorageIds_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (targetsBuilder_ == null) {
if (!other.targets_.isEmpty()) {
if (targets_.isEmpty()) {
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureTargetsIsMutable();
targets_.addAll(other.targets_);
}
onChanged();
}
} else {
if (!other.targets_.isEmpty()) {
if (targetsBuilder_.isEmpty()) {
targetsBuilder_.dispose();
targetsBuilder_ = null;
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
targetsBuilder_ =
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getTargetsFieldBuilder() : null;
} else {
targetsBuilder_.addAllMessages(other.targets_);
}
}
}
if (other.hasSource()) {
mergeSource(other.getSource());
}
if (other.hasStage()) {
setStage(other.getStage());
}
if (other.hasPipelineSize()) {
setPipelineSize(other.getPipelineSize());
}
if (other.hasMinBytesRcvd()) {
setMinBytesRcvd(other.getMinBytesRcvd());
}
if (other.hasMaxBytesRcvd()) {
setMaxBytesRcvd(other.getMaxBytesRcvd());
}
if (other.hasLatestGenerationStamp()) {
setLatestGenerationStamp(other.getLatestGenerationStamp());
}
if (other.hasRequestedChecksum()) {
mergeRequestedChecksum(other.getRequestedChecksum());
}
if (other.hasCachingStrategy()) {
mergeCachingStrategy(other.getCachingStrategy());
}
if (other.hasStorageType()) {
setStorageType(other.getStorageType());
}
if (!other.targetStorageTypes_.isEmpty()) {
if (targetStorageTypes_.isEmpty()) {
targetStorageTypes_ = other.targetStorageTypes_;
bitField0_ = (bitField0_ & ~0x00000800);
} else {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.addAll(other.targetStorageTypes_);
}
onChanged();
}
if (other.hasAllowLazyPersist()) {
setAllowLazyPersist(other.getAllowLazyPersist());
}
if (other.hasPinning()) {
setPinning(other.getPinning());
}
if (!other.targetPinnings_.isEmpty()) {
if (targetPinnings_.isEmpty()) {
targetPinnings_ = other.targetPinnings_;
bitField0_ = (bitField0_ & ~0x00004000);
} else {
ensureTargetPinningsIsMutable();
targetPinnings_.addAll(other.targetPinnings_);
}
onChanged();
}
if (other.hasStorageId()) {
bitField0_ |= 0x00008000;
storageId_ = other.storageId_;
onChanged();
}
if (!other.targetStorageIds_.isEmpty()) {
if (targetStorageIds_.isEmpty()) {
targetStorageIds_ = other.targetStorageIds_;
bitField0_ = (bitField0_ & ~0x00010000);
} else {
ensureTargetStorageIdsIsMutable();
targetStorageIds_.addAll(other.targetStorageIds_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasStage()) {
return false;
}
if (!hasPipelineSize()) {
return false;
}
if (!hasMinBytesRcvd()) {
return false;
}
if (!hasMaxBytesRcvd()) {
return false;
}
if (!hasLatestGenerationStamp()) {
return false;
}
if (!hasRequestedChecksum()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
return false;
}
}
if (hasSource()) {
if (!getSource().isInitialized()) {
return false;
}
}
if (!getRequestedChecksum().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = null;
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private java.util.List targets_ =
java.util.Collections.emptyList();
private void ensureTargetsIsMutable() {
if (!((bitField0_ & 0x00000002) != 0)) {
targets_ = new java.util.ArrayList(targets_);
bitField0_ |= 0x00000002;
}
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
if (targetsBuilder_ == null) {
return java.util.Collections.unmodifiableList(targets_);
} else {
return targetsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
if (targetsBuilder_ == null) {
return targets_.size();
} else {
return targetsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
if (targetsBuilder_ == null) {
return targets_.get(index);
} else {
return targetsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.set(index, value);
onChanged();
} else {
targetsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.set(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(value);
onChanged();
} else {
targetsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(index, value);
onChanged();
} else {
targetsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addAllTargets(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, targets_);
onChanged();
} else {
targetsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder clearTargets() {
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
targetsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder removeTargets(int index) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.remove(index);
onChanged();
} else {
targetsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
int index) {
return getTargetsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
if (targetsBuilder_ == null) {
return targets_.get(index); } else {
return targetsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
if (targetsBuilder_ != null) {
return targetsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targets_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
return getTargetsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
int index) {
return getTargetsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List
getTargetsBuilderList() {
return getTargetsFieldBuilder().getBuilderList();
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsFieldBuilder() {
if (targetsBuilder_ == null) {
targetsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
targets_,
((bitField0_ & 0x00000002) != 0),
getParentForChildren(),
isClean());
targets_ = null;
}
return targetsBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
if (sourceBuilder_ == null) {
return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
} else {
return sourceBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
source_ = value;
onChanged();
} else {
sourceBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (sourceBuilder_ == null) {
source_ = builderForValue.build();
onChanged();
} else {
sourceBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
source_ != null &&
source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
source_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(source_).mergeFrom(value).buildPartial();
} else {
source_ = value;
}
onChanged();
} else {
sourceBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder clearSource() {
if (sourceBuilder_ == null) {
source_ = null;
onChanged();
} else {
sourceBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getSourceFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
if (sourceBuilder_ != null) {
return sourceBuilder_.getMessageOrBuilder();
} else {
return source_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
}
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getSourceFieldBuilder() {
if (sourceBuilder_ == null) {
sourceBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
getSource(),
getParentForChildren(),
isClean());
source_ = null;
}
return sourceBuilder_;
}
private int stage_ = 0;
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public boolean hasStage() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.valueOf(stage_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND : result;
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public Builder setStage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
stage_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public Builder clearStage() {
bitField0_ = (bitField0_ & ~0x00000008);
stage_ = 0;
onChanged();
return this;
}
private int pipelineSize_ ;
/**
* required uint32 pipelineSize = 5;
*/
public boolean hasPipelineSize() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* required uint32 pipelineSize = 5;
*/
public int getPipelineSize() {
return pipelineSize_;
}
/**
* required uint32 pipelineSize = 5;
*/
public Builder setPipelineSize(int value) {
bitField0_ |= 0x00000010;
pipelineSize_ = value;
onChanged();
return this;
}
/**
* required uint32 pipelineSize = 5;
*/
public Builder clearPipelineSize() {
bitField0_ = (bitField0_ & ~0x00000010);
pipelineSize_ = 0;
onChanged();
return this;
}
private long minBytesRcvd_ ;
/**
* required uint64 minBytesRcvd = 6;
*/
public boolean hasMinBytesRcvd() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* required uint64 minBytesRcvd = 6;
*/
public long getMinBytesRcvd() {
return minBytesRcvd_;
}
/**
* required uint64 minBytesRcvd = 6;
*/
public Builder setMinBytesRcvd(long value) {
bitField0_ |= 0x00000020;
minBytesRcvd_ = value;
onChanged();
return this;
}
/**
* required uint64 minBytesRcvd = 6;
*/
public Builder clearMinBytesRcvd() {
bitField0_ = (bitField0_ & ~0x00000020);
minBytesRcvd_ = 0L;
onChanged();
return this;
}
private long maxBytesRcvd_ ;
/**
* required uint64 maxBytesRcvd = 7;
*/
public boolean hasMaxBytesRcvd() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* required uint64 maxBytesRcvd = 7;
*/
public long getMaxBytesRcvd() {
return maxBytesRcvd_;
}
/**
* required uint64 maxBytesRcvd = 7;
*/
public Builder setMaxBytesRcvd(long value) {
bitField0_ |= 0x00000040;
maxBytesRcvd_ = value;
onChanged();
return this;
}
/**
* required uint64 maxBytesRcvd = 7;
*/
public Builder clearMaxBytesRcvd() {
bitField0_ = (bitField0_ & ~0x00000040);
maxBytesRcvd_ = 0L;
onChanged();
return this;
}
private long latestGenerationStamp_ ;
/**
* required uint64 latestGenerationStamp = 8;
*/
public boolean hasLatestGenerationStamp() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
* required uint64 latestGenerationStamp = 8;
*/
public long getLatestGenerationStamp() {
return latestGenerationStamp_;
}
/**
* required uint64 latestGenerationStamp = 8;
*/
public Builder setLatestGenerationStamp(long value) {
bitField0_ |= 0x00000080;
latestGenerationStamp_ = value;
onChanged();
return this;
}
/**
* required uint64 latestGenerationStamp = 8;
*/
public Builder clearLatestGenerationStamp() {
bitField0_ = (bitField0_ & ~0x00000080);
latestGenerationStamp_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> requestedChecksumBuilder_;
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public boolean hasRequestedChecksum() {
return ((bitField0_ & 0x00000100) != 0);
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
if (requestedChecksumBuilder_ == null) {
return requestedChecksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
} else {
return requestedChecksumBuilder_.getMessage();
}
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public Builder setRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (requestedChecksumBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
requestedChecksum_ = value;
onChanged();
} else {
requestedChecksumBuilder_.setMessage(value);
}
bitField0_ |= 0x00000100;
return this;
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public Builder setRequestedChecksum(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
if (requestedChecksumBuilder_ == null) {
requestedChecksum_ = builderForValue.build();
onChanged();
} else {
requestedChecksumBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000100;
return this;
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public Builder mergeRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (requestedChecksumBuilder_ == null) {
if (((bitField0_ & 0x00000100) != 0) &&
requestedChecksum_ != null &&
requestedChecksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
requestedChecksum_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(requestedChecksum_).mergeFrom(value).buildPartial();
} else {
requestedChecksum_ = value;
}
onChanged();
} else {
requestedChecksumBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000100;
return this;
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public Builder clearRequestedChecksum() {
if (requestedChecksumBuilder_ == null) {
requestedChecksum_ = null;
onChanged();
} else {
requestedChecksumBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000100);
return this;
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getRequestedChecksumBuilder() {
bitField0_ |= 0x00000100;
onChanged();
return getRequestedChecksumFieldBuilder().getBuilder();
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
if (requestedChecksumBuilder_ != null) {
return requestedChecksumBuilder_.getMessageOrBuilder();
} else {
return requestedChecksum_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
}
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>
getRequestedChecksumFieldBuilder() {
if (requestedChecksumBuilder_ == null) {
requestedChecksumBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
getRequestedChecksum(),
getParentForChildren(),
isClean());
requestedChecksum_ = null;
}
return requestedChecksumBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> cachingStrategyBuilder_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000200) != 0);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
} else {
return cachingStrategyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder setCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cachingStrategy_ = value;
onChanged();
} else {
cachingStrategyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000200;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder setCachingStrategy(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder builderForValue) {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = builderForValue.build();
onChanged();
} else {
cachingStrategyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000200;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder mergeCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (((bitField0_ & 0x00000200) != 0) &&
cachingStrategy_ != null &&
cachingStrategy_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) {
cachingStrategy_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.newBuilder(cachingStrategy_).mergeFrom(value).buildPartial();
} else {
cachingStrategy_ = value;
}
onChanged();
} else {
cachingStrategyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000200;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder clearCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = null;
onChanged();
} else {
cachingStrategyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000200);
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder getCachingStrategyBuilder() {
bitField0_ |= 0x00000200;
onChanged();
return getCachingStrategyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
if (cachingStrategyBuilder_ != null) {
return cachingStrategyBuilder_.getMessageOrBuilder();
} else {
return cachingStrategy_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>
getCachingStrategyFieldBuilder() {
if (cachingStrategyBuilder_ == null) {
cachingStrategyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>(
getCachingStrategy(),
getParentForChildren(),
isClean());
cachingStrategy_ = null;
}
return cachingStrategyBuilder_;
}
private int storageType_ = 1;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000400) != 0);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000400;
storageType_ = value.getNumber();
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public Builder clearStorageType() {
bitField0_ = (bitField0_ & ~0x00000400);
storageType_ = 1;
onChanged();
return this;
}
private java.util.List targetStorageTypes_ =
java.util.Collections.emptyList();
private void ensureTargetStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000800) != 0)) {
targetStorageTypes_ = new java.util.ArrayList(targetStorageTypes_);
bitField0_ |= 0x00000800;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public java.util.List getTargetStorageTypesList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public Builder setTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.set(index, value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public Builder addAllTargetStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureTargetStorageTypesIsMutable();
for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) {
targetStorageTypes_.add(value.getNumber());
}
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public Builder clearTargetStorageTypes() {
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000800);
onChanged();
return this;
}
private boolean allowLazyPersist_ ;
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
*/
public boolean hasAllowLazyPersist() {
return ((bitField0_ & 0x00001000) != 0);
}
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
*/
public boolean getAllowLazyPersist() {
return allowLazyPersist_;
}
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
*/
public Builder setAllowLazyPersist(boolean value) {
bitField0_ |= 0x00001000;
allowLazyPersist_ = value;
onChanged();
return this;
}
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
*/
public Builder clearAllowLazyPersist() {
bitField0_ = (bitField0_ & ~0x00001000);
allowLazyPersist_ = false;
onChanged();
return this;
}
private boolean pinning_ ;
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
*/
public boolean hasPinning() {
return ((bitField0_ & 0x00002000) != 0);
}
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
*/
public boolean getPinning() {
return pinning_;
}
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
*/
public Builder setPinning(boolean value) {
bitField0_ |= 0x00002000;
pinning_ = value;
onChanged();
return this;
}
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
*/
public Builder clearPinning() {
bitField0_ = (bitField0_ & ~0x00002000);
pinning_ = false;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList targetPinnings_ = emptyBooleanList();
private void ensureTargetPinningsIsMutable() {
if (!((bitField0_ & 0x00004000) != 0)) {
targetPinnings_ = mutableCopy(targetPinnings_);
bitField0_ |= 0x00004000;
}
}
/**
* repeated bool targetPinnings = 15;
*/
public java.util.List
getTargetPinningsList() {
return ((bitField0_ & 0x00004000) != 0) ?
java.util.Collections.unmodifiableList(targetPinnings_) : targetPinnings_;
}
/**
* repeated bool targetPinnings = 15;
*/
public int getTargetPinningsCount() {
return targetPinnings_.size();
}
/**
* repeated bool targetPinnings = 15;
*/
public boolean getTargetPinnings(int index) {
return targetPinnings_.getBoolean(index);
}
/**
* repeated bool targetPinnings = 15;
*/
public Builder setTargetPinnings(
int index, boolean value) {
ensureTargetPinningsIsMutable();
targetPinnings_.setBoolean(index, value);
onChanged();
return this;
}
/**
* repeated bool targetPinnings = 15;
*/
public Builder addTargetPinnings(boolean value) {
ensureTargetPinningsIsMutable();
targetPinnings_.addBoolean(value);
onChanged();
return this;
}
/**
* repeated bool targetPinnings = 15;
*/
public Builder addAllTargetPinnings(
java.lang.Iterable extends java.lang.Boolean> values) {
ensureTargetPinningsIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, targetPinnings_);
onChanged();
return this;
}
/**
* repeated bool targetPinnings = 15;
*/
public Builder clearTargetPinnings() {
targetPinnings_ = emptyBooleanList();
bitField0_ = (bitField0_ & ~0x00004000);
onChanged();
return this;
}
private java.lang.Object storageId_ = "";
/**
* optional string storageId = 16;
*/
public boolean hasStorageId() {
return ((bitField0_ & 0x00008000) != 0);
}
/**
* optional string storageId = 16;
*/
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageId_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string storageId = 16;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string storageId = 16;
*/
public Builder setStorageId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00008000;
storageId_ = value;
onChanged();
return this;
}
/**
* optional string storageId = 16;
*/
public Builder clearStorageId() {
bitField0_ = (bitField0_ & ~0x00008000);
storageId_ = getDefaultInstance().getStorageId();
onChanged();
return this;
}
/**
* optional string storageId = 16;
*/
public Builder setStorageIdBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00008000;
storageId_ = value;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.LazyStringList targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
private void ensureTargetStorageIdsIsMutable() {
if (!((bitField0_ & 0x00010000) != 0)) {
targetStorageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(targetStorageIds_);
bitField0_ |= 0x00010000;
}
}
/**
* repeated string targetStorageIds = 17;
*/
public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
getTargetStorageIdsList() {
return targetStorageIds_.getUnmodifiableView();
}
/**
* repeated string targetStorageIds = 17;
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 17;
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 17;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder setTargetStorageIds(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.set(index, value);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder addTargetStorageIds(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(value);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder addAllTargetStorageIds(
java.lang.Iterable values) {
ensureTargetStorageIdsIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, targetStorageIds_);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder clearTargetStorageIds() {
targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00010000);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder addTargetStorageIdsBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(value);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpWriteBlockProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpWriteBlockProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpWriteBlockProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpWriteBlockProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpTransferBlockProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpTransferBlockProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
java.util.List
getTargetsList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
int getTargetsCount();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index);
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
java.util.List getTargetStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
int getTargetStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index);
/**
* repeated string targetStorageIds = 4;
*/
java.util.List
getTargetStorageIdsList();
/**
* repeated string targetStorageIds = 4;
*/
int getTargetStorageIdsCount();
/**
* repeated string targetStorageIds = 4;
*/
java.lang.String getTargetStorageIds(int index);
/**
* repeated string targetStorageIds = 4;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getTargetStorageIdsBytes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.OpTransferBlockProto}
*/
public static final class OpTransferBlockProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpTransferBlockProto)
OpTransferBlockProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpTransferBlockProto.newBuilder() to construct.
private OpTransferBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpTransferBlockProto() {
targets_ = java.util.Collections.emptyList();
targetStorageTypes_ = java.util.Collections.emptyList();
targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpTransferBlockProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) != 0)) {
targets_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
targets_.add(
input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry));
break;
}
case 24: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000004) != 0)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
targetStorageTypes_.add(rawValue);
}
break;
}
case 26: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000004) != 0)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
targetStorageTypes_.add(rawValue);
}
}
input.popLimit(oldLimit);
break;
}
case 34: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
if (!((mutable_bitField0_ & 0x00000008) != 0)) {
targetStorageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000008;
}
targetStorageIds_.add(bs);
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) != 0)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
}
if (((mutable_bitField0_ & 0x00000004) != 0)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
}
if (((mutable_bitField0_ & 0x00000008) != 0)) {
targetStorageIds_ = targetStorageIds_.getUnmodifiableView();
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.Builder.class);
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
public static final int TARGETS_FIELD_NUMBER = 2;
private java.util.List targets_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
return targets_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
return targets_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
return targets_.get(index);
}
public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 3;
private java.util.List targetStorageTypes_;
private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> targetStorageTypes_converter_ =
new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() {
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(from);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
};
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public java.util.List getTargetStorageTypesList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
}
public static final int TARGETSTORAGEIDS_FIELD_NUMBER = 4;
private org.apache.hadoop.thirdparty.protobuf.LazyStringList targetStorageIds_;
/**
* repeated string targetStorageIds = 4;
*/
public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
getTargetStorageIdsList() {
return targetStorageIds_;
}
/**
* repeated string targetStorageIds = 4;
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 4;
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 4;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
for (int i = 0; i < targets_.size(); i++) {
output.writeMessage(2, targets_.get(i));
}
for (int i = 0; i < targetStorageTypes_.size(); i++) {
output.writeEnum(3, targetStorageTypes_.get(i));
}
for (int i = 0; i < targetStorageIds_.size(); i++) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, targetStorageIds_.getRaw(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
for (int i = 0; i < targets_.size(); i++) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(2, targets_.get(i));
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageTypes_.size(); i++) {
dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSizeNoTag(targetStorageTypes_.get(i));
}
size += dataSize;
size += 1 * targetStorageTypes_.size();
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageIds_.size(); i++) {
dataSize += computeStringSizeNoTag(targetStorageIds_.getRaw(i));
}
size += dataSize;
size += 1 * getTargetStorageIdsList().size();
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (!getTargetsList()
.equals(other.getTargetsList())) return false;
if (!targetStorageTypes_.equals(other.targetStorageTypes_)) return false;
if (!getTargetStorageIdsList()
.equals(other.getTargetStorageIdsList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (getTargetsCount() > 0) {
hash = (37 * hash) + TARGETS_FIELD_NUMBER;
hash = (53 * hash) + getTargetsList().hashCode();
}
if (getTargetStorageTypesCount() > 0) {
hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + targetStorageTypes_.hashCode();
}
if (getTargetStorageIdsCount() > 0) {
hash = (37 * hash) + TARGETSTORAGEIDS_FIELD_NUMBER;
hash = (53 * hash) + getTargetStorageIdsList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpTransferBlockProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpTransferBlockProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getTargetsFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = null;
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
targetsBuilder_.clear();
}
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (targetsBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.targets_ = targets_;
} else {
result.targets_ = targetsBuilder_.build();
}
if (((bitField0_ & 0x00000004) != 0)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.targetStorageTypes_ = targetStorageTypes_;
if (((bitField0_ & 0x00000008) != 0)) {
targetStorageIds_ = targetStorageIds_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00000008);
}
result.targetStorageIds_ = targetStorageIds_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (targetsBuilder_ == null) {
if (!other.targets_.isEmpty()) {
if (targets_.isEmpty()) {
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureTargetsIsMutable();
targets_.addAll(other.targets_);
}
onChanged();
}
} else {
if (!other.targets_.isEmpty()) {
if (targetsBuilder_.isEmpty()) {
targetsBuilder_.dispose();
targetsBuilder_ = null;
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
targetsBuilder_ =
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getTargetsFieldBuilder() : null;
} else {
targetsBuilder_.addAllMessages(other.targets_);
}
}
}
if (!other.targetStorageTypes_.isEmpty()) {
if (targetStorageTypes_.isEmpty()) {
targetStorageTypes_ = other.targetStorageTypes_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.addAll(other.targetStorageTypes_);
}
onChanged();
}
if (!other.targetStorageIds_.isEmpty()) {
if (targetStorageIds_.isEmpty()) {
targetStorageIds_ = other.targetStorageIds_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureTargetStorageIdsIsMutable();
targetStorageIds_.addAll(other.targetStorageIds_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = null;
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private java.util.List targets_ =
java.util.Collections.emptyList();
private void ensureTargetsIsMutable() {
if (!((bitField0_ & 0x00000002) != 0)) {
targets_ = new java.util.ArrayList(targets_);
bitField0_ |= 0x00000002;
}
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
if (targetsBuilder_ == null) {
return java.util.Collections.unmodifiableList(targets_);
} else {
return targetsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
if (targetsBuilder_ == null) {
return targets_.size();
} else {
return targetsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
if (targetsBuilder_ == null) {
return targets_.get(index);
} else {
return targetsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.set(index, value);
onChanged();
} else {
targetsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.set(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(value);
onChanged();
} else {
targetsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(index, value);
onChanged();
} else {
targetsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addAllTargets(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, targets_);
onChanged();
} else {
targetsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder clearTargets() {
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
targetsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder removeTargets(int index) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.remove(index);
onChanged();
} else {
targetsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
int index) {
return getTargetsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
if (targetsBuilder_ == null) {
return targets_.get(index); } else {
return targetsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
if (targetsBuilder_ != null) {
return targetsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targets_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
return getTargetsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
int index) {
return getTargetsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List
getTargetsBuilderList() {
return getTargetsFieldBuilder().getBuilderList();
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsFieldBuilder() {
if (targetsBuilder_ == null) {
targetsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
targets_,
((bitField0_ & 0x00000002) != 0),
getParentForChildren(),
isClean());
targets_ = null;
}
return targetsBuilder_;
}
private java.util.List targetStorageTypes_ =
java.util.Collections.emptyList();
private void ensureTargetStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000004) != 0)) {
targetStorageTypes_ = new java.util.ArrayList(targetStorageTypes_);
bitField0_ |= 0x00000004;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public java.util.List getTargetStorageTypesList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public Builder setTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.set(index, value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public Builder addAllTargetStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureTargetStorageTypesIsMutable();
for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) {
targetStorageTypes_.add(value.getNumber());
}
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public Builder clearTargetStorageTypes() {
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.LazyStringList targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
private void ensureTargetStorageIdsIsMutable() {
if (!((bitField0_ & 0x00000008) != 0)) {
targetStorageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(targetStorageIds_);
bitField0_ |= 0x00000008;
}
}
/**
* repeated string targetStorageIds = 4;
*/
public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
getTargetStorageIdsList() {
return targetStorageIds_.getUnmodifiableView();
}
/**
* repeated string targetStorageIds = 4;
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 4;
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 4;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder setTargetStorageIds(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.set(index, value);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder addTargetStorageIds(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(value);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder addAllTargetStorageIds(
java.lang.Iterable values) {
ensureTargetStorageIdsIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, targetStorageIds_);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder clearTargetStorageIds() {
targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder addTargetStorageIdsBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(value);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpTransferBlockProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpTransferBlockProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpTransferBlockProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpTransferBlockProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpReplaceBlockProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpReplaceBlockProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
/**
* required string delHint = 2;
*/
boolean hasDelHint();
/**
* required string delHint = 2;
*/
java.lang.String getDelHint();
/**
* required string delHint = 2;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getDelHintBytes();
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
boolean hasSource();
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
boolean hasStorageType();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
/**
* optional string storageId = 5;
*/
boolean hasStorageId();
/**
* optional string storageId = 5;
*/
java.lang.String getStorageId();
/**
* optional string storageId = 5;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getStorageIdBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.OpReplaceBlockProto}
*/
public static final class OpReplaceBlockProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpReplaceBlockProto)
OpReplaceBlockProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpReplaceBlockProto.newBuilder() to construct.
private OpReplaceBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpReplaceBlockProto() {
delHint_ = "";
storageType_ = 1;
storageId_ = "";
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpReplaceBlockProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
delHint_ = bs;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) != 0)) {
subBuilder = source_.toBuilder();
}
source_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(source_);
source_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 32: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
storageType_ = rawValue;
}
break;
}
case 42: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000010;
storageId_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.Builder.class);
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
public static final int DELHINT_FIELD_NUMBER = 2;
private volatile java.lang.Object delHint_;
/**
* required string delHint = 2;
*/
public boolean hasDelHint() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string delHint = 2;
*/
public java.lang.String getDelHint() {
java.lang.Object ref = delHint_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
delHint_ = s;
}
return s;
}
}
/**
* required string delHint = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getDelHintBytes() {
java.lang.Object ref = delHint_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
delHint_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int SOURCE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
}
public static final int STORAGETYPE_FIELD_NUMBER = 4;
private int storageType_;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
public static final int STORAGEID_FIELD_NUMBER = 5;
private volatile java.lang.Object storageId_;
/**
* optional string storageId = 5;
*/
public boolean hasStorageId() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional string storageId = 5;
*/
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageId_ = s;
}
return s;
}
}
/**
* optional string storageId = 5;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDelHint()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSource()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getSource().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, delHint_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getSource());
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeEnum(4, storageType_);
}
if (((bitField0_ & 0x00000010) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, storageId_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, delHint_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getSource());
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(4, storageType_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, storageId_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (hasDelHint() != other.hasDelHint()) return false;
if (hasDelHint()) {
if (!getDelHint()
.equals(other.getDelHint())) return false;
}
if (hasSource() != other.hasSource()) return false;
if (hasSource()) {
if (!getSource()
.equals(other.getSource())) return false;
}
if (hasStorageType() != other.hasStorageType()) return false;
if (hasStorageType()) {
if (storageType_ != other.storageType_) return false;
}
if (hasStorageId() != other.hasStorageId()) return false;
if (hasStorageId()) {
if (!getStorageId()
.equals(other.getStorageId())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasDelHint()) {
hash = (37 * hash) + DELHINT_FIELD_NUMBER;
hash = (53 * hash) + getDelHint().hashCode();
}
if (hasSource()) {
hash = (37 * hash) + SOURCE_FIELD_NUMBER;
hash = (53 * hash) + getSource().hashCode();
}
if (hasStorageType()) {
hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
hash = (53 * hash) + storageType_;
}
if (hasStorageId()) {
hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
hash = (53 * hash) + getStorageId().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpReplaceBlockProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpReplaceBlockProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getSourceFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = null;
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
delHint_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (sourceBuilder_ == null) {
source_ = null;
} else {
sourceBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
storageType_ = 1;
bitField0_ = (bitField0_ & ~0x00000008);
storageId_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.delHint_ = delHint_;
if (((from_bitField0_ & 0x00000004) != 0)) {
if (sourceBuilder_ == null) {
result.source_ = source_;
} else {
result.source_ = sourceBuilder_.build();
}
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
to_bitField0_ |= 0x00000008;
}
result.storageType_ = storageType_;
if (((from_bitField0_ & 0x00000010) != 0)) {
to_bitField0_ |= 0x00000010;
}
result.storageId_ = storageId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasDelHint()) {
bitField0_ |= 0x00000002;
delHint_ = other.delHint_;
onChanged();
}
if (other.hasSource()) {
mergeSource(other.getSource());
}
if (other.hasStorageType()) {
setStorageType(other.getStorageType());
}
if (other.hasStorageId()) {
bitField0_ |= 0x00000010;
storageId_ = other.storageId_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasDelHint()) {
return false;
}
if (!hasSource()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
if (!getSource().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = null;
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private java.lang.Object delHint_ = "";
/**
* required string delHint = 2;
*/
public boolean hasDelHint() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string delHint = 2;
*/
public java.lang.String getDelHint() {
java.lang.Object ref = delHint_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
delHint_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string delHint = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getDelHintBytes() {
java.lang.Object ref = delHint_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
delHint_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string delHint = 2;
*/
public Builder setDelHint(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
delHint_ = value;
onChanged();
return this;
}
/**
* required string delHint = 2;
*/
public Builder clearDelHint() {
bitField0_ = (bitField0_ & ~0x00000002);
delHint_ = getDefaultInstance().getDelHint();
onChanged();
return this;
}
/**
* required string delHint = 2;
*/
public Builder setDelHintBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
delHint_ = value;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
if (sourceBuilder_ == null) {
return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
} else {
return sourceBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
source_ = value;
onChanged();
} else {
sourceBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (sourceBuilder_ == null) {
source_ = builderForValue.build();
onChanged();
} else {
sourceBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
source_ != null &&
source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
source_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(source_).mergeFrom(value).buildPartial();
} else {
source_ = value;
}
onChanged();
} else {
sourceBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder clearSource() {
if (sourceBuilder_ == null) {
source_ = null;
onChanged();
} else {
sourceBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getSourceFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
if (sourceBuilder_ != null) {
return sourceBuilder_.getMessageOrBuilder();
} else {
return source_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
}
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getSourceFieldBuilder() {
if (sourceBuilder_ == null) {
sourceBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
getSource(),
getParentForChildren(),
isClean());
source_ = null;
}
return sourceBuilder_;
}
private int storageType_ = 1;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(storageType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
storageType_ = value.getNumber();
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public Builder clearStorageType() {
bitField0_ = (bitField0_ & ~0x00000008);
storageType_ = 1;
onChanged();
return this;
}
private java.lang.Object storageId_ = "";
/**
* optional string storageId = 5;
*/
public boolean hasStorageId() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional string storageId = 5;
*/
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageId_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string storageId = 5;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string storageId = 5;
*/
public Builder setStorageId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
storageId_ = value;
onChanged();
return this;
}
/**
* optional string storageId = 5;
*/
public Builder clearStorageId() {
bitField0_ = (bitField0_ & ~0x00000010);
storageId_ = getDefaultInstance().getStorageId();
onChanged();
return this;
}
/**
* optional string storageId = 5;
*/
public Builder setStorageIdBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
storageId_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpReplaceBlockProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpReplaceBlockProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpReplaceBlockProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpReplaceBlockProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpCopyBlockProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpCopyBlockProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpCopyBlockProto}
*/
public static final class OpCopyBlockProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpCopyBlockProto)
OpCopyBlockProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpCopyBlockProto.newBuilder() to construct.
private OpCopyBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpCopyBlockProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpCopyBlockProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.Builder.class);
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpCopyBlockProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpCopyBlockProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = null;
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = null;
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpCopyBlockProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpCopyBlockProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpCopyBlockProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpCopyBlockProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpBlockChecksumProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpBlockChecksumProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
boolean hasBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumProto}
*/
public static final class OpBlockChecksumProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpBlockChecksumProto)
OpBlockChecksumProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpBlockChecksumProto.newBuilder() to construct.
private OpBlockChecksumProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpBlockChecksumProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpBlockChecksumProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) != 0)) {
subBuilder = blockChecksumOptions_.toBuilder();
}
blockChecksumOptions_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blockChecksumOptions_);
blockChecksumOptions_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.Builder.class);
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getBlockChecksumOptions());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(2, getBlockChecksumOptions());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (hasBlockChecksumOptions() != other.hasBlockChecksumOptions()) return false;
if (hasBlockChecksumOptions()) {
if (!getBlockChecksumOptions()
.equals(other.getBlockChecksumOptions())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasBlockChecksumOptions()) {
hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksumOptions().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpBlockChecksumProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getBlockChecksumOptionsFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = null;
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = null;
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
if (blockChecksumOptionsBuilder_ == null) {
result.blockChecksumOptions_ = blockChecksumOptions_;
} else {
result.blockChecksumOptions_ = blockChecksumOptionsBuilder_.build();
}
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasBlockChecksumOptions()) {
mergeBlockChecksumOptions(other.getBlockChecksumOptions());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = null;
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
} else {
return blockChecksumOptionsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockChecksumOptions_ = value;
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public Builder setBlockChecksumOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = builderForValue.build();
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0) &&
blockChecksumOptions_ != null &&
blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
blockChecksumOptions_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder(blockChecksumOptions_).mergeFrom(value).buildPartial();
} else {
blockChecksumOptions_ = value;
}
onChanged();
} else {
blockChecksumOptionsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public Builder clearBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = null;
onChanged();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getBlockChecksumOptionsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
if (blockChecksumOptionsBuilder_ != null) {
return blockChecksumOptionsBuilder_.getMessageOrBuilder();
} else {
return blockChecksumOptions_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>
getBlockChecksumOptionsFieldBuilder() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
getBlockChecksumOptions(),
getParentForChildren(),
isClean());
blockChecksumOptions_ = null;
}
return blockChecksumOptionsBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockChecksumProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockChecksumProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpBlockChecksumProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpBlockChecksumProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpBlockGroupChecksumProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpBlockGroupChecksumProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
boolean hasDatanodes();
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes();
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder();
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
java.util.List
getBlockTokensList();
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index);
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
int getBlockTokensCount();
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList();
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index);
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
boolean hasEcPolicy();
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy();
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder();
/**
* repeated uint32 blockIndices = 5;
*/
java.util.List getBlockIndicesList();
/**
* repeated uint32 blockIndices = 5;
*/
int getBlockIndicesCount();
/**
* repeated uint32 blockIndices = 5;
*/
int getBlockIndices(int index);
/**
* required uint64 requestedNumBytes = 6;
*/
boolean hasRequestedNumBytes();
/**
* required uint64 requestedNumBytes = 6;
*/
long getRequestedNumBytes();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
boolean hasBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockGroupChecksumProto}
*/
public static final class OpBlockGroupChecksumProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpBlockGroupChecksumProto)
OpBlockGroupChecksumProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpBlockGroupChecksumProto.newBuilder() to construct.
private OpBlockGroupChecksumProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpBlockGroupChecksumProto() {
blockTokens_ = java.util.Collections.emptyList();
blockIndices_ = emptyIntList();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpBlockGroupChecksumProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) != 0)) {
subBuilder = datanodes_.toBuilder();
}
datanodes_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(datanodes_);
datanodes_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) != 0)) {
blockTokens_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
blockTokens_.add(
input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry));
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) != 0)) {
subBuilder = ecPolicy_.toBuilder();
}
ecPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(ecPolicy_);
ecPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 40: {
if (!((mutable_bitField0_ & 0x00000010) != 0)) {
blockIndices_ = newIntList();
mutable_bitField0_ |= 0x00000010;
}
blockIndices_.addInt(input.readUInt32());
break;
}
case 42: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000010) != 0) && input.getBytesUntilLimit() > 0) {
blockIndices_ = newIntList();
mutable_bitField0_ |= 0x00000010;
}
while (input.getBytesUntilLimit() > 0) {
blockIndices_.addInt(input.readUInt32());
}
input.popLimit(limit);
break;
}
case 48: {
bitField0_ |= 0x00000008;
requestedNumBytes_ = input.readUInt64();
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) != 0)) {
subBuilder = blockChecksumOptions_.toBuilder();
}
blockChecksumOptions_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blockChecksumOptions_);
blockChecksumOptions_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) != 0)) {
blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_);
}
if (((mutable_bitField0_ & 0x00000010) != 0)) {
blockIndices_.makeImmutable(); // C
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.Builder.class);
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
public static final int DATANODES_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto datanodes_;
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public boolean hasDatanodes() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes() {
return datanodes_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance() : datanodes_;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder() {
return datanodes_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance() : datanodes_;
}
public static final int BLOCKTOKENS_FIELD_NUMBER = 3;
private java.util.List blockTokens_;
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public java.util.List getBlockTokensList() {
return blockTokens_;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList() {
return blockTokens_;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public int getBlockTokensCount() {
return blockTokens_.size();
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
return blockTokens_.get(index);
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index) {
return blockTokens_.get(index);
}
public static final int ECPOLICY_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
}
public static final int BLOCKINDICES_FIELD_NUMBER = 5;
private org.apache.hadoop.thirdparty.protobuf.Internal.IntList blockIndices_;
/**
* repeated uint32 blockIndices = 5;
*/
public java.util.List
getBlockIndicesList() {
return blockIndices_;
}
/**
* repeated uint32 blockIndices = 5;
*/
public int getBlockIndicesCount() {
return blockIndices_.size();
}
/**
* repeated uint32 blockIndices = 5;
*/
public int getBlockIndices(int index) {
return blockIndices_.getInt(index);
}
public static final int REQUESTEDNUMBYTES_FIELD_NUMBER = 6;
private long requestedNumBytes_;
/**
* required uint64 requestedNumBytes = 6;
*/
public boolean hasRequestedNumBytes() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required uint64 requestedNumBytes = 6;
*/
public long getRequestedNumBytes() {
return requestedNumBytes_;
}
public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDatanodes()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEcPolicy()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasRequestedNumBytes()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getDatanodes().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlockTokensCount(); i++) {
if (!getBlockTokens(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getEcPolicy().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getDatanodes());
}
for (int i = 0; i < blockTokens_.size(); i++) {
output.writeMessage(3, blockTokens_.get(i));
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(4, getEcPolicy());
}
for (int i = 0; i < blockIndices_.size(); i++) {
output.writeUInt32(5, blockIndices_.getInt(i));
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeUInt64(6, requestedNumBytes_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeMessage(7, getBlockChecksumOptions());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(2, getDatanodes());
}
for (int i = 0; i < blockTokens_.size(); i++) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, blockTokens_.get(i));
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(4, getEcPolicy());
}
{
int dataSize = 0;
for (int i = 0; i < blockIndices_.size(); i++) {
dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32SizeNoTag(blockIndices_.getInt(i));
}
size += dataSize;
size += 1 * getBlockIndicesList().size();
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(6, requestedNumBytes_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(7, getBlockChecksumOptions());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (hasDatanodes() != other.hasDatanodes()) return false;
if (hasDatanodes()) {
if (!getDatanodes()
.equals(other.getDatanodes())) return false;
}
if (!getBlockTokensList()
.equals(other.getBlockTokensList())) return false;
if (hasEcPolicy() != other.hasEcPolicy()) return false;
if (hasEcPolicy()) {
if (!getEcPolicy()
.equals(other.getEcPolicy())) return false;
}
if (!getBlockIndicesList()
.equals(other.getBlockIndicesList())) return false;
if (hasRequestedNumBytes() != other.hasRequestedNumBytes()) return false;
if (hasRequestedNumBytes()) {
if (getRequestedNumBytes()
!= other.getRequestedNumBytes()) return false;
}
if (hasBlockChecksumOptions() != other.hasBlockChecksumOptions()) return false;
if (hasBlockChecksumOptions()) {
if (!getBlockChecksumOptions()
.equals(other.getBlockChecksumOptions())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasDatanodes()) {
hash = (37 * hash) + DATANODES_FIELD_NUMBER;
hash = (53 * hash) + getDatanodes().hashCode();
}
if (getBlockTokensCount() > 0) {
hash = (37 * hash) + BLOCKTOKENS_FIELD_NUMBER;
hash = (53 * hash) + getBlockTokensList().hashCode();
}
if (hasEcPolicy()) {
hash = (37 * hash) + ECPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getEcPolicy().hashCode();
}
if (getBlockIndicesCount() > 0) {
hash = (37 * hash) + BLOCKINDICES_FIELD_NUMBER;
hash = (53 * hash) + getBlockIndicesList().hashCode();
}
if (hasRequestedNumBytes()) {
hash = (37 * hash) + REQUESTEDNUMBYTES_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getRequestedNumBytes());
}
if (hasBlockChecksumOptions()) {
hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksumOptions().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockGroupChecksumProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpBlockGroupChecksumProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getDatanodesFieldBuilder();
getBlockTokensFieldBuilder();
getEcPolicyFieldBuilder();
getBlockChecksumOptionsFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = null;
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (datanodesBuilder_ == null) {
datanodes_ = null;
} else {
datanodesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (blockTokensBuilder_ == null) {
blockTokens_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
blockTokensBuilder_.clear();
}
if (ecPolicyBuilder_ == null) {
ecPolicy_ = null;
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
blockIndices_ = emptyIntList();
bitField0_ = (bitField0_ & ~0x00000010);
requestedNumBytes_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = null;
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
if (datanodesBuilder_ == null) {
result.datanodes_ = datanodes_;
} else {
result.datanodes_ = datanodesBuilder_.build();
}
to_bitField0_ |= 0x00000002;
}
if (blockTokensBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0)) {
blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blockTokens_ = blockTokens_;
} else {
result.blockTokens_ = blockTokensBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) != 0)) {
if (ecPolicyBuilder_ == null) {
result.ecPolicy_ = ecPolicy_;
} else {
result.ecPolicy_ = ecPolicyBuilder_.build();
}
to_bitField0_ |= 0x00000004;
}
if (((bitField0_ & 0x00000010) != 0)) {
blockIndices_.makeImmutable();
bitField0_ = (bitField0_ & ~0x00000010);
}
result.blockIndices_ = blockIndices_;
if (((from_bitField0_ & 0x00000020) != 0)) {
result.requestedNumBytes_ = requestedNumBytes_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000040) != 0)) {
if (blockChecksumOptionsBuilder_ == null) {
result.blockChecksumOptions_ = blockChecksumOptions_;
} else {
result.blockChecksumOptions_ = blockChecksumOptionsBuilder_.build();
}
to_bitField0_ |= 0x00000010;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasDatanodes()) {
mergeDatanodes(other.getDatanodes());
}
if (blockTokensBuilder_ == null) {
if (!other.blockTokens_.isEmpty()) {
if (blockTokens_.isEmpty()) {
blockTokens_ = other.blockTokens_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlockTokensIsMutable();
blockTokens_.addAll(other.blockTokens_);
}
onChanged();
}
} else {
if (!other.blockTokens_.isEmpty()) {
if (blockTokensBuilder_.isEmpty()) {
blockTokensBuilder_.dispose();
blockTokensBuilder_ = null;
blockTokens_ = other.blockTokens_;
bitField0_ = (bitField0_ & ~0x00000004);
blockTokensBuilder_ =
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getBlockTokensFieldBuilder() : null;
} else {
blockTokensBuilder_.addAllMessages(other.blockTokens_);
}
}
}
if (other.hasEcPolicy()) {
mergeEcPolicy(other.getEcPolicy());
}
if (!other.blockIndices_.isEmpty()) {
if (blockIndices_.isEmpty()) {
blockIndices_ = other.blockIndices_;
bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureBlockIndicesIsMutable();
blockIndices_.addAll(other.blockIndices_);
}
onChanged();
}
if (other.hasRequestedNumBytes()) {
setRequestedNumBytes(other.getRequestedNumBytes());
}
if (other.hasBlockChecksumOptions()) {
mergeBlockChecksumOptions(other.getBlockChecksumOptions());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasDatanodes()) {
return false;
}
if (!hasEcPolicy()) {
return false;
}
if (!hasRequestedNumBytes()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
if (!getDatanodes().isInitialized()) {
return false;
}
for (int i = 0; i < getBlockTokensCount(); i++) {
if (!getBlockTokens(i).isInitialized()) {
return false;
}
}
if (!getEcPolicy().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = null;
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto datanodes_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> datanodesBuilder_;
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public boolean hasDatanodes() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes() {
if (datanodesBuilder_ == null) {
return datanodes_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance() : datanodes_;
} else {
return datanodesBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public Builder setDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (datanodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
datanodes_ = value;
onChanged();
} else {
datanodesBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public Builder setDatanodes(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
if (datanodesBuilder_ == null) {
datanodes_ = builderForValue.build();
onChanged();
} else {
datanodesBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public Builder mergeDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (datanodesBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0) &&
datanodes_ != null &&
datanodes_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) {
datanodes_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder(datanodes_).mergeFrom(value).buildPartial();
} else {
datanodes_ = value;
}
onChanged();
} else {
datanodesBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public Builder clearDatanodes() {
if (datanodesBuilder_ == null) {
datanodes_ = null;
onChanged();
} else {
datanodesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder getDatanodesBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getDatanodesFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder() {
if (datanodesBuilder_ != null) {
return datanodesBuilder_.getMessageOrBuilder();
} else {
return datanodes_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance() : datanodes_;
}
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getDatanodesFieldBuilder() {
if (datanodesBuilder_ == null) {
datanodesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>(
getDatanodes(),
getParentForChildren(),
isClean());
datanodes_ = null;
}
return datanodesBuilder_;
}
private java.util.List blockTokens_ =
java.util.Collections.emptyList();
private void ensureBlockTokensIsMutable() {
if (!((bitField0_ & 0x00000004) != 0)) {
blockTokens_ = new java.util.ArrayList(blockTokens_);
bitField0_ |= 0x00000004;
}
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokensBuilder_;
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public java.util.List getBlockTokensList() {
if (blockTokensBuilder_ == null) {
return java.util.Collections.unmodifiableList(blockTokens_);
} else {
return blockTokensBuilder_.getMessageList();
}
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public int getBlockTokensCount() {
if (blockTokensBuilder_ == null) {
return blockTokens_.size();
} else {
return blockTokensBuilder_.getCount();
}
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
if (blockTokensBuilder_ == null) {
return blockTokens_.get(index);
} else {
return blockTokensBuilder_.getMessage(index);
}
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder setBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.set(index, value);
onChanged();
} else {
blockTokensBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder setBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.set(index, builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder addBlockTokens(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.add(value);
onChanged();
} else {
blockTokensBuilder_.addMessage(value);
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder addBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.add(index, value);
onChanged();
} else {
blockTokensBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder addBlockTokens(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.add(builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder addBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.add(index, builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder addAllBlockTokens(
java.lang.Iterable extends org.apache.hadoop.security.proto.SecurityProtos.TokenProto> values) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, blockTokens_);
onChanged();
} else {
blockTokensBuilder_.addAllMessages(values);
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder clearBlockTokens() {
if (blockTokensBuilder_ == null) {
blockTokens_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
blockTokensBuilder_.clear();
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public Builder removeBlockTokens(int index) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.remove(index);
onChanged();
} else {
blockTokensBuilder_.remove(index);
}
return this;
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokensBuilder(
int index) {
return getBlockTokensFieldBuilder().getBuilder(index);
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index) {
if (blockTokensBuilder_ == null) {
return blockTokens_.get(index); } else {
return blockTokensBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList() {
if (blockTokensBuilder_ != null) {
return blockTokensBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blockTokens_);
}
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder() {
return getBlockTokensFieldBuilder().addBuilder(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder(
int index) {
return getBlockTokensFieldBuilder().addBuilder(
index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
}
/**
*
* each internal block has a block token
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public java.util.List
getBlockTokensBuilderList() {
return getBlockTokensFieldBuilder().getBuilderList();
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensFieldBuilder() {
if (blockTokensBuilder_ == null) {
blockTokensBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
blockTokens_,
((bitField0_ & 0x00000004) != 0),
getParentForChildren(),
isClean());
blockTokens_ = null;
}
return blockTokensBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
if (ecPolicyBuilder_ == null) {
return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
} else {
return ecPolicyBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ecPolicy_ = value;
onChanged();
} else {
ecPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder setEcPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = builderForValue.build();
onChanged();
} else {
ecPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000008) != 0) &&
ecPolicy_ != null &&
ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
ecPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(ecPolicy_).mergeFrom(value).buildPartial();
} else {
ecPolicy_ = value;
}
onChanged();
} else {
ecPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder clearEcPolicy() {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = null;
onChanged();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getEcPolicyFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
if (ecPolicyBuilder_ != null) {
return ecPolicyBuilder_.getMessageOrBuilder();
} else {
return ecPolicy_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
}
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>
getEcPolicyFieldBuilder() {
if (ecPolicyBuilder_ == null) {
ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
getEcPolicy(),
getParentForChildren(),
isClean());
ecPolicy_ = null;
}
return ecPolicyBuilder_;
}
private org.apache.hadoop.thirdparty.protobuf.Internal.IntList blockIndices_ = emptyIntList();
private void ensureBlockIndicesIsMutable() {
if (!((bitField0_ & 0x00000010) != 0)) {
blockIndices_ = mutableCopy(blockIndices_);
bitField0_ |= 0x00000010;
}
}
/**
* repeated uint32 blockIndices = 5;
*/
public java.util.List
getBlockIndicesList() {
return ((bitField0_ & 0x00000010) != 0) ?
java.util.Collections.unmodifiableList(blockIndices_) : blockIndices_;
}
/**
* repeated uint32 blockIndices = 5;
*/
public int getBlockIndicesCount() {
return blockIndices_.size();
}
/**
* repeated uint32 blockIndices = 5;
*/
public int getBlockIndices(int index) {
return blockIndices_.getInt(index);
}
/**
* repeated uint32 blockIndices = 5;
*/
public Builder setBlockIndices(
int index, int value) {
ensureBlockIndicesIsMutable();
blockIndices_.setInt(index, value);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
*/
public Builder addBlockIndices(int value) {
ensureBlockIndicesIsMutable();
blockIndices_.addInt(value);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
*/
public Builder addAllBlockIndices(
java.lang.Iterable extends java.lang.Integer> values) {
ensureBlockIndicesIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, blockIndices_);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
*/
public Builder clearBlockIndices() {
blockIndices_ = emptyIntList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
return this;
}
private long requestedNumBytes_ ;
/**
* required uint64 requestedNumBytes = 6;
*/
public boolean hasRequestedNumBytes() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* required uint64 requestedNumBytes = 6;
*/
public long getRequestedNumBytes() {
return requestedNumBytes_;
}
/**
* required uint64 requestedNumBytes = 6;
*/
public Builder setRequestedNumBytes(long value) {
bitField0_ |= 0x00000020;
requestedNumBytes_ = value;
onChanged();
return this;
}
/**
* required uint64 requestedNumBytes = 6;
*/
public Builder clearRequestedNumBytes() {
bitField0_ = (bitField0_ & ~0x00000020);
requestedNumBytes_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
} else {
return blockChecksumOptionsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockChecksumOptions_ = value;
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder setBlockChecksumOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = builderForValue.build();
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (((bitField0_ & 0x00000040) != 0) &&
blockChecksumOptions_ != null &&
blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
blockChecksumOptions_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder(blockChecksumOptions_).mergeFrom(value).buildPartial();
} else {
blockChecksumOptions_ = value;
}
onChanged();
} else {
blockChecksumOptionsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder clearBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = null;
onChanged();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getBlockChecksumOptionsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
if (blockChecksumOptionsBuilder_ != null) {
return blockChecksumOptionsBuilder_.getMessageOrBuilder();
} else {
return blockChecksumOptions_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>
getBlockChecksumOptionsFieldBuilder() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
getBlockChecksumOptions(),
getParentForChildren(),
isClean());
blockChecksumOptions_ = null;
}
return blockChecksumOptionsBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockGroupChecksumProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockGroupChecksumProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpBlockGroupChecksumProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpBlockGroupChecksumProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShortCircuitShmIdProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmIdProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required int64 hi = 1;
*/
boolean hasHi();
/**
* required int64 hi = 1;
*/
long getHi();
/**
* required int64 lo = 2;
*/
boolean hasLo();
/**
* required int64 lo = 2;
*/
long getLo();
}
/**
*
**
* An ID uniquely identifying a shared memory segment.
*
*
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmIdProto}
*/
public static final class ShortCircuitShmIdProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmIdProto)
ShortCircuitShmIdProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShortCircuitShmIdProto.newBuilder() to construct.
private ShortCircuitShmIdProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShortCircuitShmIdProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShortCircuitShmIdProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
hi_ = input.readInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
lo_ = input.readInt64();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder.class);
}
private int bitField0_;
public static final int HI_FIELD_NUMBER = 1;
private long hi_;
/**
* required int64 hi = 1;
*/
public boolean hasHi() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required int64 hi = 1;
*/
public long getHi() {
return hi_;
}
public static final int LO_FIELD_NUMBER = 2;
private long lo_;
/**
* required int64 lo = 2;
*/
public boolean hasLo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required int64 lo = 2;
*/
public long getLo() {
return lo_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHi()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLo()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeInt64(1, hi_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeInt64(2, lo_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt64Size(1, hi_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt64Size(2, lo_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) obj;
if (hasHi() != other.hasHi()) return false;
if (hasHi()) {
if (getHi()
!= other.getHi()) return false;
}
if (hasLo() != other.hasLo()) return false;
if (hasLo()) {
if (getLo()
!= other.getLo()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHi()) {
hash = (37 * hash) + HI_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getHi());
}
if (hasLo()) {
hash = (37 * hash) + LO_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getLo());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* An ID uniquely identifying a shared memory segment.
*
*
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmIdProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmIdProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
hi_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
lo_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.hi_ = hi_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.lo_ = lo_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) return this;
if (other.hasHi()) {
setHi(other.getHi());
}
if (other.hasLo()) {
setLo(other.getLo());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHi()) {
return false;
}
if (!hasLo()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private long hi_ ;
/**
* required int64 hi = 1;
*/
public boolean hasHi() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required int64 hi = 1;
*/
public long getHi() {
return hi_;
}
/**
* required int64 hi = 1;
*/
public Builder setHi(long value) {
bitField0_ |= 0x00000001;
hi_ = value;
onChanged();
return this;
}
/**
* required int64 hi = 1;
*/
public Builder clearHi() {
bitField0_ = (bitField0_ & ~0x00000001);
hi_ = 0L;
onChanged();
return this;
}
private long lo_ ;
/**
* required int64 lo = 2;
*/
public boolean hasLo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required int64 lo = 2;
*/
public long getLo() {
return lo_;
}
/**
* required int64 lo = 2;
*/
public Builder setLo(long value) {
bitField0_ |= 0x00000002;
lo_ = value;
onChanged();
return this;
}
/**
* required int64 lo = 2;
*/
public Builder clearLo() {
bitField0_ = (bitField0_ & ~0x00000002);
lo_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmIdProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmIdProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShortCircuitShmIdProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ShortCircuitShmIdProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShortCircuitShmSlotProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmSlotProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
boolean hasShmId();
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId();
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder();
/**
* required int32 slotIdx = 2;
*/
boolean hasSlotIdx();
/**
* required int32 slotIdx = 2;
*/
int getSlotIdx();
}
/**
*
**
* An ID uniquely identifying a slot within a shared memory segment.
*
*
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmSlotProto}
*/
public static final class ShortCircuitShmSlotProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmSlotProto)
ShortCircuitShmSlotProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShortCircuitShmSlotProto.newBuilder() to construct.
private ShortCircuitShmSlotProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShortCircuitShmSlotProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShortCircuitShmSlotProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = shmId_.toBuilder();
}
shmId_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(shmId_);
shmId_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
slotIdx_ = input.readInt32();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder.class);
}
private int bitField0_;
public static final int SHMID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto shmId_;
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public boolean hasShmId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId() {
return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder() {
return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
}
public static final int SLOTIDX_FIELD_NUMBER = 2;
private int slotIdx_;
/**
* required int32 slotIdx = 2;
*/
public boolean hasSlotIdx() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required int32 slotIdx = 2;
*/
public int getSlotIdx() {
return slotIdx_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasShmId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSlotIdx()) {
memoizedIsInitialized = 0;
return false;
}
if (!getShmId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getShmId());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeInt32(2, slotIdx_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getShmId());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt32Size(2, slotIdx_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) obj;
if (hasShmId() != other.hasShmId()) return false;
if (hasShmId()) {
if (!getShmId()
.equals(other.getShmId())) return false;
}
if (hasSlotIdx() != other.hasSlotIdx()) return false;
if (hasSlotIdx()) {
if (getSlotIdx()
!= other.getSlotIdx()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasShmId()) {
hash = (37 * hash) + SHMID_FIELD_NUMBER;
hash = (53 * hash) + getShmId().hashCode();
}
if (hasSlotIdx()) {
hash = (37 * hash) + SLOTIDX_FIELD_NUMBER;
hash = (53 * hash) + getSlotIdx();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* An ID uniquely identifying a slot within a shared memory segment.
*
*
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmSlotProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmSlotProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getShmIdFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (shmIdBuilder_ == null) {
shmId_ = null;
} else {
shmIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
slotIdx_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (shmIdBuilder_ == null) {
result.shmId_ = shmId_;
} else {
result.shmId_ = shmIdBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.slotIdx_ = slotIdx_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) return this;
if (other.hasShmId()) {
mergeShmId(other.getShmId());
}
if (other.hasSlotIdx()) {
setSlotIdx(other.getSlotIdx());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasShmId()) {
return false;
}
if (!hasSlotIdx()) {
return false;
}
if (!getShmId().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto shmId_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> shmIdBuilder_;
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public boolean hasShmId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId() {
if (shmIdBuilder_ == null) {
return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
} else {
return shmIdBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder setShmId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (shmIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
shmId_ = value;
onChanged();
} else {
shmIdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder setShmId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder builderForValue) {
if (shmIdBuilder_ == null) {
shmId_ = builderForValue.build();
onChanged();
} else {
shmIdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder mergeShmId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (shmIdBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
shmId_ != null &&
shmId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) {
shmId_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.newBuilder(shmId_).mergeFrom(value).buildPartial();
} else {
shmId_ = value;
}
onChanged();
} else {
shmIdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder clearShmId() {
if (shmIdBuilder_ == null) {
shmId_ = null;
onChanged();
} else {
shmIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder getShmIdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getShmIdFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder() {
if (shmIdBuilder_ != null) {
return shmIdBuilder_.getMessageOrBuilder();
} else {
return shmId_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>
getShmIdFieldBuilder() {
if (shmIdBuilder_ == null) {
shmIdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>(
getShmId(),
getParentForChildren(),
isClean());
shmId_ = null;
}
return shmIdBuilder_;
}
private int slotIdx_ ;
/**
* required int32 slotIdx = 2;
*/
public boolean hasSlotIdx() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required int32 slotIdx = 2;
*/
public int getSlotIdx() {
return slotIdx_;
}
/**
* required int32 slotIdx = 2;
*/
public Builder setSlotIdx(int value) {
bitField0_ |= 0x00000002;
slotIdx_ = value;
onChanged();
return this;
}
/**
* required int32 slotIdx = 2;
*/
public Builder clearSlotIdx() {
bitField0_ = (bitField0_ & ~0x00000002);
slotIdx_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmSlotProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmSlotProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShortCircuitShmSlotProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ShortCircuitShmSlotProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpRequestShortCircuitAccessProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpRequestShortCircuitAccessProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
*/
boolean hasMaxVersion();
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
*/
int getMaxVersion();
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
boolean hasSlotId();
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId();
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder();
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
*/
boolean hasSupportsReceiptVerification();
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
*/
boolean getSupportsReceiptVerification();
}
/**
* Protobuf type {@code hadoop.hdfs.OpRequestShortCircuitAccessProto}
*/
public static final class OpRequestShortCircuitAccessProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpRequestShortCircuitAccessProto)
OpRequestShortCircuitAccessProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpRequestShortCircuitAccessProto.newBuilder() to construct.
private OpRequestShortCircuitAccessProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpRequestShortCircuitAccessProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpRequestShortCircuitAccessProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
maxVersion_ = input.readUInt32();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) != 0)) {
subBuilder = slotId_.toBuilder();
}
slotId_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(slotId_);
slotId_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 32: {
bitField0_ |= 0x00000008;
supportsReceiptVerification_ = input.readBool();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.Builder.class);
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
public static final int MAXVERSION_FIELD_NUMBER = 2;
private int maxVersion_;
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
*/
public boolean hasMaxVersion() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
*/
public int getMaxVersion() {
return maxVersion_;
}
public static final int SLOTID_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public boolean hasSlotId() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
}
public static final int SUPPORTSRECEIPTVERIFICATION_FIELD_NUMBER = 4;
private boolean supportsReceiptVerification_;
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
*/
public boolean hasSupportsReceiptVerification() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
*/
public boolean getSupportsReceiptVerification() {
return supportsReceiptVerification_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMaxVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasSlotId()) {
if (!getSlotId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt32(2, maxVersion_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getSlotId());
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeBool(4, supportsReceiptVerification_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(2, maxVersion_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getSlotId());
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(4, supportsReceiptVerification_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (hasMaxVersion() != other.hasMaxVersion()) return false;
if (hasMaxVersion()) {
if (getMaxVersion()
!= other.getMaxVersion()) return false;
}
if (hasSlotId() != other.hasSlotId()) return false;
if (hasSlotId()) {
if (!getSlotId()
.equals(other.getSlotId())) return false;
}
if (hasSupportsReceiptVerification() != other.hasSupportsReceiptVerification()) return false;
if (hasSupportsReceiptVerification()) {
if (getSupportsReceiptVerification()
!= other.getSupportsReceiptVerification()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasMaxVersion()) {
hash = (37 * hash) + MAXVERSION_FIELD_NUMBER;
hash = (53 * hash) + getMaxVersion();
}
if (hasSlotId()) {
hash = (37 * hash) + SLOTID_FIELD_NUMBER;
hash = (53 * hash) + getSlotId().hashCode();
}
if (hasSupportsReceiptVerification()) {
hash = (37 * hash) + SUPPORTSRECEIPTVERIFICATION_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getSupportsReceiptVerification());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpRequestShortCircuitAccessProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpRequestShortCircuitAccessProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getSlotIdFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = null;
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
maxVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
if (slotIdBuilder_ == null) {
slotId_ = null;
} else {
slotIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
supportsReceiptVerification_ = false;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.maxVersion_ = maxVersion_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
if (slotIdBuilder_ == null) {
result.slotId_ = slotId_;
} else {
result.slotId_ = slotIdBuilder_.build();
}
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.supportsReceiptVerification_ = supportsReceiptVerification_;
to_bitField0_ |= 0x00000008;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasMaxVersion()) {
setMaxVersion(other.getMaxVersion());
}
if (other.hasSlotId()) {
mergeSlotId(other.getSlotId());
}
if (other.hasSupportsReceiptVerification()) {
setSupportsReceiptVerification(other.getSupportsReceiptVerification());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasMaxVersion()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
if (hasSlotId()) {
if (!getSlotId().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = null;
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private int maxVersion_ ;
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
*/
public boolean hasMaxVersion() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
*/
public int getMaxVersion() {
return maxVersion_;
}
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
*/
public Builder setMaxVersion(int value) {
bitField0_ |= 0x00000002;
maxVersion_ = value;
onChanged();
return this;
}
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
*/
public Builder clearMaxVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
maxVersion_ = 0;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder> slotIdBuilder_;
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public boolean hasSlotId() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
if (slotIdBuilder_ == null) {
return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
} else {
return slotIdBuilder_.getMessage();
}
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public Builder setSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
if (slotIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
slotId_ = value;
onChanged();
} else {
slotIdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public Builder setSlotId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder builderForValue) {
if (slotIdBuilder_ == null) {
slotId_ = builderForValue.build();
onChanged();
} else {
slotIdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public Builder mergeSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
if (slotIdBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
slotId_ != null &&
slotId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) {
slotId_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.newBuilder(slotId_).mergeFrom(value).buildPartial();
} else {
slotId_ = value;
}
onChanged();
} else {
slotIdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public Builder clearSlotId() {
if (slotIdBuilder_ == null) {
slotId_ = null;
onChanged();
} else {
slotIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder getSlotIdBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getSlotIdFieldBuilder().getBuilder();
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
if (slotIdBuilder_ != null) {
return slotIdBuilder_.getMessageOrBuilder();
} else {
return slotId_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
}
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>
getSlotIdFieldBuilder() {
if (slotIdBuilder_ == null) {
slotIdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>(
getSlotId(),
getParentForChildren(),
isClean());
slotId_ = null;
}
return slotIdBuilder_;
}
private boolean supportsReceiptVerification_ ;
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
*/
public boolean hasSupportsReceiptVerification() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
*/
public boolean getSupportsReceiptVerification() {
return supportsReceiptVerification_;
}
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
*/
public Builder setSupportsReceiptVerification(boolean value) {
bitField0_ |= 0x00000008;
supportsReceiptVerification_ = value;
onChanged();
return this;
}
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
*/
public Builder clearSupportsReceiptVerification() {
bitField0_ = (bitField0_ & ~0x00000008);
supportsReceiptVerification_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpRequestShortCircuitAccessProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpRequestShortCircuitAccessProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpRequestShortCircuitAccessProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpRequestShortCircuitAccessProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ReleaseShortCircuitAccessRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
boolean hasSlotId();
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId();
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
boolean hasTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessRequestProto}
*/
public static final class ReleaseShortCircuitAccessRequestProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
ReleaseShortCircuitAccessRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ReleaseShortCircuitAccessRequestProto.newBuilder() to construct.
private ReleaseShortCircuitAccessRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ReleaseShortCircuitAccessRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReleaseShortCircuitAccessRequestProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = slotId_.toBuilder();
}
slotId_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(slotId_);
slotId_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) != 0)) {
subBuilder = traceInfo_.toBuilder();
}
traceInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(traceInfo_);
traceInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.Builder.class);
}
private int bitField0_;
public static final int SLOTID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public boolean hasSlotId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
}
public static final int TRACEINFO_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasSlotId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getSlotId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getSlotId());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getTraceInfo());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getSlotId());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(2, getTraceInfo());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto) obj;
if (hasSlotId() != other.hasSlotId()) return false;
if (hasSlotId()) {
if (!getSlotId()
.equals(other.getSlotId())) return false;
}
if (hasTraceInfo() != other.hasTraceInfo()) return false;
if (hasTraceInfo()) {
if (!getTraceInfo()
.equals(other.getTraceInfo())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasSlotId()) {
hash = (37 * hash) + SLOTID_FIELD_NUMBER;
hash = (53 * hash) + getSlotId().hashCode();
}
if (hasTraceInfo()) {
hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
hash = (53 * hash) + getTraceInfo().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getSlotIdFieldBuilder();
getTraceInfoFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (slotIdBuilder_ == null) {
slotId_ = null;
} else {
slotIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (traceInfoBuilder_ == null) {
traceInfo_ = null;
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (slotIdBuilder_ == null) {
result.slotId_ = slotId_;
} else {
result.slotId_ = slotIdBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
if (traceInfoBuilder_ == null) {
result.traceInfo_ = traceInfo_;
} else {
result.traceInfo_ = traceInfoBuilder_.build();
}
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.getDefaultInstance()) return this;
if (other.hasSlotId()) {
mergeSlotId(other.getSlotId());
}
if (other.hasTraceInfo()) {
mergeTraceInfo(other.getTraceInfo());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasSlotId()) {
return false;
}
if (!getSlotId().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder> slotIdBuilder_;
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public boolean hasSlotId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
if (slotIdBuilder_ == null) {
return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
} else {
return slotIdBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public Builder setSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
if (slotIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
slotId_ = value;
onChanged();
} else {
slotIdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public Builder setSlotId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder builderForValue) {
if (slotIdBuilder_ == null) {
slotId_ = builderForValue.build();
onChanged();
} else {
slotIdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public Builder mergeSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
if (slotIdBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
slotId_ != null &&
slotId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) {
slotId_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.newBuilder(slotId_).mergeFrom(value).buildPartial();
} else {
slotId_ = value;
}
onChanged();
} else {
slotIdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public Builder clearSlotId() {
if (slotIdBuilder_ == null) {
slotId_ = null;
onChanged();
} else {
slotIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder getSlotIdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getSlotIdFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
if (slotIdBuilder_ != null) {
return slotIdBuilder_.getMessageOrBuilder();
} else {
return slotId_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>
getSlotIdFieldBuilder() {
if (slotIdBuilder_ == null) {
slotIdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>(
getSlotId(),
getParentForChildren(),
isClean());
slotId_ = null;
}
return slotIdBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
if (traceInfoBuilder_ == null) {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
} else {
return traceInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
traceInfo_ = value;
onChanged();
} else {
traceInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
if (traceInfoBuilder_ == null) {
traceInfo_ = builderForValue.build();
onChanged();
} else {
traceInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0) &&
traceInfo_ != null &&
traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
traceInfo_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder(traceInfo_).mergeFrom(value).buildPartial();
} else {
traceInfo_ = value;
}
onChanged();
} else {
traceInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder clearTraceInfo() {
if (traceInfoBuilder_ == null) {
traceInfo_ = null;
onChanged();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTraceInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
if (traceInfoBuilder_ != null) {
return traceInfoBuilder_.getMessageOrBuilder();
} else {
return traceInfo_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>
getTraceInfoFieldBuilder() {
if (traceInfoBuilder_ == null) {
traceInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
getTraceInfo(),
getParentForChildren(),
isClean());
traceInfo_ = null;
}
return traceInfoBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ReleaseShortCircuitAccessRequestProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ReleaseShortCircuitAccessRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ReleaseShortCircuitAccessResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
/**
* optional string error = 2;
*/
boolean hasError();
/**
* optional string error = 2;
*/
java.lang.String getError();
/**
* optional string error = 2;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessResponseProto}
*/
public static final class ReleaseShortCircuitAccessResponseProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
ReleaseShortCircuitAccessResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ReleaseShortCircuitAccessResponseProto.newBuilder() to construct.
private ReleaseShortCircuitAccessResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ReleaseShortCircuitAccessResponseProto() {
status_ = 0;
error_ = "";
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReleaseShortCircuitAccessResponseProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = rawValue;
}
break;
}
case 18: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
error_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
public static final int ERROR_FIELD_NUMBER = 2;
private volatile java.lang.Object error_;
/**
* optional string error = 2;
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string error = 2;
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
error_ = s;
}
return s;
}
}
/**
* optional string error = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, error_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, error_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (hasError() != other.hasError()) return false;
if (hasError()) {
if (!getError()
.equals(other.getError())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
if (hasError()) {
hash = (37 * hash) + ERROR_FIELD_NUMBER;
hash = (53 * hash) + getError().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
status_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
error_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.error_ = error_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasError()) {
bitField0_ |= 0x00000002;
error_ = other.error_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
private java.lang.Object error_ = "";
/**
* optional string error = 2;
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string error = 2;
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
error_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string error = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string error = 2;
*/
public Builder setError(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
error_ = value;
onChanged();
return this;
}
/**
* optional string error = 2;
*/
public Builder clearError() {
bitField0_ = (bitField0_ & ~0x00000002);
error_ = getDefaultInstance().getError();
onChanged();
return this;
}
/**
* optional string error = 2;
*/
public Builder setErrorBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
error_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ReleaseShortCircuitAccessResponseProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ReleaseShortCircuitAccessResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShortCircuitShmRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmRequestProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
boolean hasClientName();
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
java.lang.String getClientName();
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getClientNameBytes();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
boolean hasTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmRequestProto}
*/
public static final class ShortCircuitShmRequestProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmRequestProto)
ShortCircuitShmRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShortCircuitShmRequestProto.newBuilder() to construct.
private ShortCircuitShmRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShortCircuitShmRequestProto() {
clientName_ = "";
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShortCircuitShmRequestProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
clientName_ = bs;
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) != 0)) {
subBuilder = traceInfo_.toBuilder();
}
traceInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(traceInfo_);
traceInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.Builder.class);
}
private int bitField0_;
public static final int CLIENTNAME_FIELD_NUMBER = 1;
private volatile java.lang.Object clientName_;
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public boolean hasClientName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public java.lang.String getClientName() {
java.lang.Object ref = clientName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clientName_ = s;
}
return s;
}
}
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getClientNameBytes() {
java.lang.Object ref = clientName_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clientName_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int TRACEINFO_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasClientName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, clientName_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getTraceInfo());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, clientName_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(2, getTraceInfo());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto) obj;
if (hasClientName() != other.hasClientName()) return false;
if (hasClientName()) {
if (!getClientName()
.equals(other.getClientName())) return false;
}
if (hasTraceInfo() != other.hasTraceInfo()) return false;
if (hasTraceInfo()) {
if (!getTraceInfo()
.equals(other.getTraceInfo())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasClientName()) {
hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
hash = (53 * hash) + getClientName().hashCode();
}
if (hasTraceInfo()) {
hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
hash = (53 * hash) + getTraceInfo().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmRequestProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getTraceInfoFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
clientName_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
if (traceInfoBuilder_ == null) {
traceInfo_ = null;
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.clientName_ = clientName_;
if (((from_bitField0_ & 0x00000002) != 0)) {
if (traceInfoBuilder_ == null) {
result.traceInfo_ = traceInfo_;
} else {
result.traceInfo_ = traceInfoBuilder_.build();
}
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.getDefaultInstance()) return this;
if (other.hasClientName()) {
bitField0_ |= 0x00000001;
clientName_ = other.clientName_;
onChanged();
}
if (other.hasTraceInfo()) {
mergeTraceInfo(other.getTraceInfo());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasClientName()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object clientName_ = "";
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public boolean hasClientName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public java.lang.String getClientName() {
java.lang.Object ref = clientName_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clientName_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getClientNameBytes() {
java.lang.Object ref = clientName_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clientName_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public Builder setClientName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
clientName_ = value;
onChanged();
return this;
}
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public Builder clearClientName() {
bitField0_ = (bitField0_ & ~0x00000001);
clientName_ = getDefaultInstance().getClientName();
onChanged();
return this;
}
/**
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
*/
public Builder setClientNameBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
clientName_ = value;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
if (traceInfoBuilder_ == null) {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
} else {
return traceInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
traceInfo_ = value;
onChanged();
} else {
traceInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
if (traceInfoBuilder_ == null) {
traceInfo_ = builderForValue.build();
onChanged();
} else {
traceInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0) &&
traceInfo_ != null &&
traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
traceInfo_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder(traceInfo_).mergeFrom(value).buildPartial();
} else {
traceInfo_ = value;
}
onChanged();
} else {
traceInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder clearTraceInfo() {
if (traceInfoBuilder_ == null) {
traceInfo_ = null;
onChanged();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTraceInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
if (traceInfoBuilder_ != null) {
return traceInfoBuilder_.getMessageOrBuilder();
} else {
return traceInfo_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>
getTraceInfoFieldBuilder() {
if (traceInfoBuilder_ == null) {
traceInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
getTraceInfo(),
getParentForChildren(),
isClean());
traceInfo_ = null;
}
return traceInfoBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmRequestProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShortCircuitShmRequestProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ShortCircuitShmRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShortCircuitShmResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmResponseProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
/**
* optional string error = 2;
*/
boolean hasError();
/**
* optional string error = 2;
*/
java.lang.String getError();
/**
* optional string error = 2;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes();
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
boolean hasId();
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId();
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmResponseProto}
*/
public static final class ShortCircuitShmResponseProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmResponseProto)
ShortCircuitShmResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShortCircuitShmResponseProto.newBuilder() to construct.
private ShortCircuitShmResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShortCircuitShmResponseProto() {
status_ = 0;
error_ = "";
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShortCircuitShmResponseProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = rawValue;
}
break;
}
case 18: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
error_ = bs;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) != 0)) {
subBuilder = id_.toBuilder();
}
id_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(id_);
id_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
public static final int ERROR_FIELD_NUMBER = 2;
private volatile java.lang.Object error_;
/**
* optional string error = 2;
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string error = 2;
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
error_ = s;
}
return s;
}
}
/**
* optional string error = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int ID_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto id_;
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public boolean hasId() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId() {
return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder() {
return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (hasId()) {
if (!getId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, error_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getId());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, error_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getId());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (hasError() != other.hasError()) return false;
if (hasError()) {
if (!getError()
.equals(other.getError())) return false;
}
if (hasId() != other.hasId()) return false;
if (hasId()) {
if (!getId()
.equals(other.getId())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
if (hasError()) {
hash = (37 * hash) + ERROR_FIELD_NUMBER;
hash = (53 * hash) + getError().hashCode();
}
if (hasId()) {
hash = (37 * hash) + ID_FIELD_NUMBER;
hash = (53 * hash) + getId().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmResponseProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getIdFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
status_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
error_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (idBuilder_ == null) {
id_ = null;
} else {
idBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.error_ = error_;
if (((from_bitField0_ & 0x00000004) != 0)) {
if (idBuilder_ == null) {
result.id_ = id_;
} else {
result.id_ = idBuilder_.build();
}
to_bitField0_ |= 0x00000004;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasError()) {
bitField0_ |= 0x00000002;
error_ = other.error_;
onChanged();
}
if (other.hasId()) {
mergeId(other.getId());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
if (hasId()) {
if (!getId().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
private java.lang.Object error_ = "";
/**
* optional string error = 2;
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string error = 2;
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
error_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string error = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string error = 2;
*/
public Builder setError(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
error_ = value;
onChanged();
return this;
}
/**
* optional string error = 2;
*/
public Builder clearError() {
bitField0_ = (bitField0_ & ~0x00000002);
error_ = getDefaultInstance().getError();
onChanged();
return this;
}
/**
* optional string error = 2;
*/
public Builder setErrorBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
error_ = value;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto id_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> idBuilder_;
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public boolean hasId() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId() {
if (idBuilder_ == null) {
return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
} else {
return idBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder setId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (idBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
id_ = value;
onChanged();
} else {
idBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder setId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder builderForValue) {
if (idBuilder_ == null) {
id_ = builderForValue.build();
onChanged();
} else {
idBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (idBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
id_ != null &&
id_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) {
id_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.newBuilder(id_).mergeFrom(value).buildPartial();
} else {
id_ = value;
}
onChanged();
} else {
idBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder clearId() {
if (idBuilder_ == null) {
id_ = null;
onChanged();
} else {
idBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder getIdBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getIdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder() {
if (idBuilder_ != null) {
return idBuilder_.getMessageOrBuilder();
} else {
return id_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
}
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>
getIdFieldBuilder() {
if (idBuilder_ == null) {
idBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>(
getId(),
getParentForChildren(),
isClean());
id_ = null;
}
return idBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmResponseProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShortCircuitShmResponseProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ShortCircuitShmResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface PacketHeaderProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.PacketHeaderProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
*/
boolean hasOffsetInBlock();
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
*/
long getOffsetInBlock();
/**
* required sfixed64 seqno = 2;
*/
boolean hasSeqno();
/**
* required sfixed64 seqno = 2;
*/
long getSeqno();
/**
* required bool lastPacketInBlock = 3;
*/
boolean hasLastPacketInBlock();
/**
* required bool lastPacketInBlock = 3;
*/
boolean getLastPacketInBlock();
/**
* required sfixed32 dataLen = 4;
*/
boolean hasDataLen();
/**
* required sfixed32 dataLen = 4;
*/
int getDataLen();
/**
* optional bool syncBlock = 5 [default = false];
*/
boolean hasSyncBlock();
/**
* optional bool syncBlock = 5 [default = false];
*/
boolean getSyncBlock();
}
/**
* Protobuf type {@code hadoop.hdfs.PacketHeaderProto}
*/
public static final class PacketHeaderProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.PacketHeaderProto)
PacketHeaderProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use PacketHeaderProto.newBuilder() to construct.
private PacketHeaderProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private PacketHeaderProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private PacketHeaderProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 9: {
bitField0_ |= 0x00000001;
offsetInBlock_ = input.readSFixed64();
break;
}
case 17: {
bitField0_ |= 0x00000002;
seqno_ = input.readSFixed64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
lastPacketInBlock_ = input.readBool();
break;
}
case 37: {
bitField0_ |= 0x00000008;
dataLen_ = input.readSFixed32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
syncBlock_ = input.readBool();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
}
private int bitField0_;
public static final int OFFSETINBLOCK_FIELD_NUMBER = 1;
private long offsetInBlock_;
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
*/
public boolean hasOffsetInBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
*/
public long getOffsetInBlock() {
return offsetInBlock_;
}
public static final int SEQNO_FIELD_NUMBER = 2;
private long seqno_;
/**
* required sfixed64 seqno = 2;
*/
public boolean hasSeqno() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required sfixed64 seqno = 2;
*/
public long getSeqno() {
return seqno_;
}
public static final int LASTPACKETINBLOCK_FIELD_NUMBER = 3;
private boolean lastPacketInBlock_;
/**
* required bool lastPacketInBlock = 3;
*/
public boolean hasLastPacketInBlock() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required bool lastPacketInBlock = 3;
*/
public boolean getLastPacketInBlock() {
return lastPacketInBlock_;
}
public static final int DATALEN_FIELD_NUMBER = 4;
private int dataLen_;
/**
* required sfixed32 dataLen = 4;
*/
public boolean hasDataLen() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required sfixed32 dataLen = 4;
*/
public int getDataLen() {
return dataLen_;
}
public static final int SYNCBLOCK_FIELD_NUMBER = 5;
private boolean syncBlock_;
/**
* optional bool syncBlock = 5 [default = false];
*/
public boolean hasSyncBlock() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional bool syncBlock = 5 [default = false];
*/
public boolean getSyncBlock() {
return syncBlock_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasOffsetInBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSeqno()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLastPacketInBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDataLen()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeSFixed64(1, offsetInBlock_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeSFixed64(2, seqno_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeBool(3, lastPacketInBlock_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeSFixed32(4, dataLen_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeBool(5, syncBlock_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeSFixed64Size(1, offsetInBlock_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeSFixed64Size(2, seqno_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(3, lastPacketInBlock_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeSFixed32Size(4, dataLen_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(5, syncBlock_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) obj;
if (hasOffsetInBlock() != other.hasOffsetInBlock()) return false;
if (hasOffsetInBlock()) {
if (getOffsetInBlock()
!= other.getOffsetInBlock()) return false;
}
if (hasSeqno() != other.hasSeqno()) return false;
if (hasSeqno()) {
if (getSeqno()
!= other.getSeqno()) return false;
}
if (hasLastPacketInBlock() != other.hasLastPacketInBlock()) return false;
if (hasLastPacketInBlock()) {
if (getLastPacketInBlock()
!= other.getLastPacketInBlock()) return false;
}
if (hasDataLen() != other.hasDataLen()) return false;
if (hasDataLen()) {
if (getDataLen()
!= other.getDataLen()) return false;
}
if (hasSyncBlock() != other.hasSyncBlock()) return false;
if (hasSyncBlock()) {
if (getSyncBlock()
!= other.getSyncBlock()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasOffsetInBlock()) {
hash = (37 * hash) + OFFSETINBLOCK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getOffsetInBlock());
}
if (hasSeqno()) {
hash = (37 * hash) + SEQNO_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getSeqno());
}
if (hasLastPacketInBlock()) {
hash = (37 * hash) + LASTPACKETINBLOCK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getLastPacketInBlock());
}
if (hasDataLen()) {
hash = (37 * hash) + DATALEN_FIELD_NUMBER;
hash = (53 * hash) + getDataLen();
}
if (hasSyncBlock()) {
hash = (37 * hash) + SYNCBLOCK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getSyncBlock());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.PacketHeaderProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.PacketHeaderProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
offsetInBlock_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
seqno_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
lastPacketInBlock_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
dataLen_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
syncBlock_ = false;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.offsetInBlock_ = offsetInBlock_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.seqno_ = seqno_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.lastPacketInBlock_ = lastPacketInBlock_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.dataLen_ = dataLen_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.syncBlock_ = syncBlock_;
to_bitField0_ |= 0x00000010;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance()) return this;
if (other.hasOffsetInBlock()) {
setOffsetInBlock(other.getOffsetInBlock());
}
if (other.hasSeqno()) {
setSeqno(other.getSeqno());
}
if (other.hasLastPacketInBlock()) {
setLastPacketInBlock(other.getLastPacketInBlock());
}
if (other.hasDataLen()) {
setDataLen(other.getDataLen());
}
if (other.hasSyncBlock()) {
setSyncBlock(other.getSyncBlock());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasOffsetInBlock()) {
return false;
}
if (!hasSeqno()) {
return false;
}
if (!hasLastPacketInBlock()) {
return false;
}
if (!hasDataLen()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private long offsetInBlock_ ;
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
*/
public boolean hasOffsetInBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
*/
public long getOffsetInBlock() {
return offsetInBlock_;
}
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
*/
public Builder setOffsetInBlock(long value) {
bitField0_ |= 0x00000001;
offsetInBlock_ = value;
onChanged();
return this;
}
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
*/
public Builder clearOffsetInBlock() {
bitField0_ = (bitField0_ & ~0x00000001);
offsetInBlock_ = 0L;
onChanged();
return this;
}
private long seqno_ ;
/**
* required sfixed64 seqno = 2;
*/
public boolean hasSeqno() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required sfixed64 seqno = 2;
*/
public long getSeqno() {
return seqno_;
}
/**
* required sfixed64 seqno = 2;
*/
public Builder setSeqno(long value) {
bitField0_ |= 0x00000002;
seqno_ = value;
onChanged();
return this;
}
/**
* required sfixed64 seqno = 2;
*/
public Builder clearSeqno() {
bitField0_ = (bitField0_ & ~0x00000002);
seqno_ = 0L;
onChanged();
return this;
}
private boolean lastPacketInBlock_ ;
/**
* required bool lastPacketInBlock = 3;
*/
public boolean hasLastPacketInBlock() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required bool lastPacketInBlock = 3;
*/
public boolean getLastPacketInBlock() {
return lastPacketInBlock_;
}
/**
* required bool lastPacketInBlock = 3;
*/
public Builder setLastPacketInBlock(boolean value) {
bitField0_ |= 0x00000004;
lastPacketInBlock_ = value;
onChanged();
return this;
}
/**
* required bool lastPacketInBlock = 3;
*/
public Builder clearLastPacketInBlock() {
bitField0_ = (bitField0_ & ~0x00000004);
lastPacketInBlock_ = false;
onChanged();
return this;
}
private int dataLen_ ;
/**
* required sfixed32 dataLen = 4;
*/
public boolean hasDataLen() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required sfixed32 dataLen = 4;
*/
public int getDataLen() {
return dataLen_;
}
/**
* required sfixed32 dataLen = 4;
*/
public Builder setDataLen(int value) {
bitField0_ |= 0x00000008;
dataLen_ = value;
onChanged();
return this;
}
/**
* required sfixed32 dataLen = 4;
*/
public Builder clearDataLen() {
bitField0_ = (bitField0_ & ~0x00000008);
dataLen_ = 0;
onChanged();
return this;
}
private boolean syncBlock_ ;
/**
* optional bool syncBlock = 5 [default = false];
*/
public boolean hasSyncBlock() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional bool syncBlock = 5 [default = false];
*/
public boolean getSyncBlock() {
return syncBlock_;
}
/**
* optional bool syncBlock = 5 [default = false];
*/
public Builder setSyncBlock(boolean value) {
bitField0_ |= 0x00000010;
syncBlock_ = value;
onChanged();
return this;
}
/**
* optional bool syncBlock = 5 [default = false];
*/
public Builder clearSyncBlock() {
bitField0_ = (bitField0_ & ~0x00000010);
syncBlock_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.PacketHeaderProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.PacketHeaderProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public PacketHeaderProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new PacketHeaderProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface PipelineAckProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.PipelineAckProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required sint64 seqno = 1;
*/
boolean hasSeqno();
/**
* required sint64 seqno = 1;
*/
long getSeqno();
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
java.util.List getReplyList();
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
int getReplyCount();
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index);
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
boolean hasDownstreamAckTimeNanos();
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
long getDownstreamAckTimeNanos();
/**
* repeated uint32 flag = 4 [packed = true];
*/
java.util.List getFlagList();
/**
* repeated uint32 flag = 4 [packed = true];
*/
int getFlagCount();
/**
* repeated uint32 flag = 4 [packed = true];
*/
int getFlag(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.PipelineAckProto}
*/
public static final class PipelineAckProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.PipelineAckProto)
PipelineAckProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use PipelineAckProto.newBuilder() to construct.
private PipelineAckProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private PipelineAckProto() {
reply_ = java.util.Collections.emptyList();
flag_ = emptyIntList();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private PipelineAckProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
seqno_ = input.readSInt64();
break;
}
case 16: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000002) != 0)) {
reply_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
reply_.add(rawValue);
}
break;
}
case 18: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000002) != 0)) {
reply_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
reply_.add(rawValue);
}
}
input.popLimit(oldLimit);
break;
}
case 24: {
bitField0_ |= 0x00000002;
downstreamAckTimeNanos_ = input.readUInt64();
break;
}
case 32: {
if (!((mutable_bitField0_ & 0x00000008) != 0)) {
flag_ = newIntList();
mutable_bitField0_ |= 0x00000008;
}
flag_.addInt(input.readUInt32());
break;
}
case 34: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000008) != 0) && input.getBytesUntilLimit() > 0) {
flag_ = newIntList();
mutable_bitField0_ |= 0x00000008;
}
while (input.getBytesUntilLimit() > 0) {
flag_.addInt(input.readUInt32());
}
input.popLimit(limit);
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) != 0)) {
reply_ = java.util.Collections.unmodifiableList(reply_);
}
if (((mutable_bitField0_ & 0x00000008) != 0)) {
flag_.makeImmutable(); // C
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
}
private int bitField0_;
public static final int SEQNO_FIELD_NUMBER = 1;
private long seqno_;
/**
* required sint64 seqno = 1;
*/
public boolean hasSeqno() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required sint64 seqno = 1;
*/
public long getSeqno() {
return seqno_;
}
public static final int REPLY_FIELD_NUMBER = 2;
private java.util.List reply_;
private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> reply_converter_ =
new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>() {
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status convert(java.lang.Integer from) {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(from);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
};
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public java.util.List getReplyList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>(reply_, reply_converter_);
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public int getReplyCount() {
return reply_.size();
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index) {
return reply_converter_.convert(reply_.get(index));
}
public static final int DOWNSTREAMACKTIMENANOS_FIELD_NUMBER = 3;
private long downstreamAckTimeNanos_;
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public boolean hasDownstreamAckTimeNanos() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public long getDownstreamAckTimeNanos() {
return downstreamAckTimeNanos_;
}
public static final int FLAG_FIELD_NUMBER = 4;
private org.apache.hadoop.thirdparty.protobuf.Internal.IntList flag_;
/**
* repeated uint32 flag = 4 [packed = true];
*/
public java.util.List
getFlagList() {
return flag_;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public int getFlagCount() {
return flag_.size();
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public int getFlag(int index) {
return flag_.getInt(index);
}
private int flagMemoizedSerializedSize = -1;
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasSeqno()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) != 0)) {
output.writeSInt64(1, seqno_);
}
for (int i = 0; i < reply_.size(); i++) {
output.writeEnum(2, reply_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(3, downstreamAckTimeNanos_);
}
if (getFlagList().size() > 0) {
output.writeUInt32NoTag(34);
output.writeUInt32NoTag(flagMemoizedSerializedSize);
}
for (int i = 0; i < flag_.size(); i++) {
output.writeUInt32NoTag(flag_.getInt(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeSInt64Size(1, seqno_);
}
{
int dataSize = 0;
for (int i = 0; i < reply_.size(); i++) {
dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSizeNoTag(reply_.get(i));
}
size += dataSize;
size += 1 * reply_.size();
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(3, downstreamAckTimeNanos_);
}
{
int dataSize = 0;
for (int i = 0; i < flag_.size(); i++) {
dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32SizeNoTag(flag_.getInt(i));
}
size += dataSize;
if (!getFlagList().isEmpty()) {
size += 1;
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
flagMemoizedSerializedSize = dataSize;
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj;
if (hasSeqno() != other.hasSeqno()) return false;
if (hasSeqno()) {
if (getSeqno()
!= other.getSeqno()) return false;
}
if (!reply_.equals(other.reply_)) return false;
if (hasDownstreamAckTimeNanos() != other.hasDownstreamAckTimeNanos()) return false;
if (hasDownstreamAckTimeNanos()) {
if (getDownstreamAckTimeNanos()
!= other.getDownstreamAckTimeNanos()) return false;
}
if (!getFlagList()
.equals(other.getFlagList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasSeqno()) {
hash = (37 * hash) + SEQNO_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getSeqno());
}
if (getReplyCount() > 0) {
hash = (37 * hash) + REPLY_FIELD_NUMBER;
hash = (53 * hash) + reply_.hashCode();
}
if (hasDownstreamAckTimeNanos()) {
hash = (37 * hash) + DOWNSTREAMACKTIMENANOS_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getDownstreamAckTimeNanos());
}
if (getFlagCount() > 0) {
hash = (37 * hash) + FLAG_FIELD_NUMBER;
hash = (53 * hash) + getFlagList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.PipelineAckProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.PipelineAckProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
seqno_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
reply_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
downstreamAckTimeNanos_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
flag_ = emptyIntList();
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.seqno_ = seqno_;
to_bitField0_ |= 0x00000001;
}
if (((bitField0_ & 0x00000002) != 0)) {
reply_ = java.util.Collections.unmodifiableList(reply_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.reply_ = reply_;
if (((from_bitField0_ & 0x00000004) != 0)) {
result.downstreamAckTimeNanos_ = downstreamAckTimeNanos_;
to_bitField0_ |= 0x00000002;
}
if (((bitField0_ & 0x00000008) != 0)) {
flag_.makeImmutable();
bitField0_ = (bitField0_ & ~0x00000008);
}
result.flag_ = flag_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this;
if (other.hasSeqno()) {
setSeqno(other.getSeqno());
}
if (!other.reply_.isEmpty()) {
if (reply_.isEmpty()) {
reply_ = other.reply_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureReplyIsMutable();
reply_.addAll(other.reply_);
}
onChanged();
}
if (other.hasDownstreamAckTimeNanos()) {
setDownstreamAckTimeNanos(other.getDownstreamAckTimeNanos());
}
if (!other.flag_.isEmpty()) {
if (flag_.isEmpty()) {
flag_ = other.flag_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureFlagIsMutable();
flag_.addAll(other.flag_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasSeqno()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private long seqno_ ;
/**
* required sint64 seqno = 1;
*/
public boolean hasSeqno() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required sint64 seqno = 1;
*/
public long getSeqno() {
return seqno_;
}
/**
* required sint64 seqno = 1;
*/
public Builder setSeqno(long value) {
bitField0_ |= 0x00000001;
seqno_ = value;
onChanged();
return this;
}
/**
* required sint64 seqno = 1;
*/
public Builder clearSeqno() {
bitField0_ = (bitField0_ & ~0x00000001);
seqno_ = 0L;
onChanged();
return this;
}
private java.util.List reply_ =
java.util.Collections.emptyList();
private void ensureReplyIsMutable() {
if (!((bitField0_ & 0x00000002) != 0)) {
reply_ = new java.util.ArrayList(reply_);
bitField0_ |= 0x00000002;
}
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public java.util.List getReplyList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>(reply_, reply_converter_);
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public int getReplyCount() {
return reply_.size();
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index) {
return reply_converter_.convert(reply_.get(index));
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public Builder setReply(
int index, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
ensureReplyIsMutable();
reply_.set(index, value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public Builder addReply(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
ensureReplyIsMutable();
reply_.add(value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public Builder addAllReply(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> values) {
ensureReplyIsMutable();
for (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value : values) {
reply_.add(value.getNumber());
}
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public Builder clearReply() {
reply_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
private long downstreamAckTimeNanos_ ;
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public boolean hasDownstreamAckTimeNanos() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public long getDownstreamAckTimeNanos() {
return downstreamAckTimeNanos_;
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public Builder setDownstreamAckTimeNanos(long value) {
bitField0_ |= 0x00000004;
downstreamAckTimeNanos_ = value;
onChanged();
return this;
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public Builder clearDownstreamAckTimeNanos() {
bitField0_ = (bitField0_ & ~0x00000004);
downstreamAckTimeNanos_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.Internal.IntList flag_ = emptyIntList();
private void ensureFlagIsMutable() {
if (!((bitField0_ & 0x00000008) != 0)) {
flag_ = mutableCopy(flag_);
bitField0_ |= 0x00000008;
}
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public java.util.List
getFlagList() {
return ((bitField0_ & 0x00000008) != 0) ?
java.util.Collections.unmodifiableList(flag_) : flag_;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public int getFlagCount() {
return flag_.size();
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public int getFlag(int index) {
return flag_.getInt(index);
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public Builder setFlag(
int index, int value) {
ensureFlagIsMutable();
flag_.setInt(index, value);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public Builder addFlag(int value) {
ensureFlagIsMutable();
flag_.addInt(value);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public Builder addAllFlag(
java.lang.Iterable extends java.lang.Integer> values) {
ensureFlagIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, flag_);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public Builder clearFlag() {
flag_ = emptyIntList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.PipelineAckProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.PipelineAckProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public PipelineAckProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new PipelineAckProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ReadOpChecksumInfoProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReadOpChecksumInfoProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
boolean hasChecksum();
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum();
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder();
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
*/
boolean hasChunkOffset();
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
*/
long getChunkOffset();
}
/**
*
**
* Sent as part of the BlockOpResponseProto
* for READ_BLOCK and COPY_BLOCK operations.
*
*
* Protobuf type {@code hadoop.hdfs.ReadOpChecksumInfoProto}
*/
public static final class ReadOpChecksumInfoProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ReadOpChecksumInfoProto)
ReadOpChecksumInfoProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ReadOpChecksumInfoProto.newBuilder() to construct.
private ReadOpChecksumInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ReadOpChecksumInfoProto() {
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReadOpChecksumInfoProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = checksum_.toBuilder();
}
checksum_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(checksum_);
checksum_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
chunkOffset_ = input.readUInt64();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
}
private int bitField0_;
public static final int CHECKSUM_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_;
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public boolean hasChecksum() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
}
public static final int CHUNKOFFSET_FIELD_NUMBER = 2;
private long chunkOffset_;
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
*/
public boolean hasChunkOffset() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
*/
public long getChunkOffset() {
return chunkOffset_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasChecksum()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasChunkOffset()) {
memoizedIsInitialized = 0;
return false;
}
if (!getChecksum().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getChecksum());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(2, chunkOffset_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getChecksum());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(2, chunkOffset_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj;
if (hasChecksum() != other.hasChecksum()) return false;
if (hasChecksum()) {
if (!getChecksum()
.equals(other.getChecksum())) return false;
}
if (hasChunkOffset() != other.hasChunkOffset()) return false;
if (hasChunkOffset()) {
if (getChunkOffset()
!= other.getChunkOffset()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasChecksum()) {
hash = (37 * hash) + CHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getChecksum().hashCode();
}
if (hasChunkOffset()) {
hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getChunkOffset());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Sent as part of the BlockOpResponseProto
* for READ_BLOCK and COPY_BLOCK operations.
*
*
* Protobuf type {@code hadoop.hdfs.ReadOpChecksumInfoProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReadOpChecksumInfoProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getChecksumFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (checksumBuilder_ == null) {
checksum_ = null;
} else {
checksumBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
chunkOffset_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (checksumBuilder_ == null) {
result.checksum_ = checksum_;
} else {
result.checksum_ = checksumBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.chunkOffset_ = chunkOffset_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this;
if (other.hasChecksum()) {
mergeChecksum(other.getChecksum());
}
if (other.hasChunkOffset()) {
setChunkOffset(other.getChunkOffset());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasChecksum()) {
return false;
}
if (!hasChunkOffset()) {
return false;
}
if (!getChecksum().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> checksumBuilder_;
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public boolean hasChecksum() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
if (checksumBuilder_ == null) {
return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
} else {
return checksumBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (checksumBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
checksum_ = value;
onChanged();
} else {
checksumBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder setChecksum(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
if (checksumBuilder_ == null) {
checksum_ = builderForValue.build();
onChanged();
} else {
checksumBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder mergeChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (checksumBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
checksum_ != null &&
checksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
checksum_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(checksum_).mergeFrom(value).buildPartial();
} else {
checksum_ = value;
}
onChanged();
} else {
checksumBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder clearChecksum() {
if (checksumBuilder_ == null) {
checksum_ = null;
onChanged();
} else {
checksumBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getChecksumFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
if (checksumBuilder_ != null) {
return checksumBuilder_.getMessageOrBuilder();
} else {
return checksum_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
}
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>
getChecksumFieldBuilder() {
if (checksumBuilder_ == null) {
checksumBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
getChecksum(),
getParentForChildren(),
isClean());
checksum_ = null;
}
return checksumBuilder_;
}
private long chunkOffset_ ;
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
*/
public boolean hasChunkOffset() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
*/
public long getChunkOffset() {
return chunkOffset_;
}
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
*/
public Builder setChunkOffset(long value) {
bitField0_ |= 0x00000002;
chunkOffset_ = value;
onChanged();
return this;
}
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
*/
public Builder clearChunkOffset() {
bitField0_ = (bitField0_ & ~0x00000002);
chunkOffset_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReadOpChecksumInfoProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ReadOpChecksumInfoProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ReadOpChecksumInfoProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ReadOpChecksumInfoProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface BlockOpResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockOpResponseProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
/**
* optional string firstBadLink = 2;
*/
boolean hasFirstBadLink();
/**
* optional string firstBadLink = 2;
*/
java.lang.String getFirstBadLink();
/**
* optional string firstBadLink = 2;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getFirstBadLinkBytes();
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
boolean hasChecksumResponse();
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse();
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder();
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
boolean hasReadOpChecksumInfo();
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo();
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder();
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
boolean hasMessage();
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
java.lang.String getMessage();
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getMessageBytes();
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
*/
boolean hasShortCircuitAccessVersion();
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
*/
int getShortCircuitAccessVersion();
}
/**
* Protobuf type {@code hadoop.hdfs.BlockOpResponseProto}
*/
public static final class BlockOpResponseProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockOpResponseProto)
BlockOpResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use BlockOpResponseProto.newBuilder() to construct.
private BlockOpResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private BlockOpResponseProto() {
status_ = 0;
firstBadLink_ = "";
message_ = "";
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockOpResponseProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = rawValue;
}
break;
}
case 18: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
firstBadLink_ = bs;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) != 0)) {
subBuilder = checksumResponse_.toBuilder();
}
checksumResponse_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(checksumResponse_);
checksumResponse_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) != 0)) {
subBuilder = readOpChecksumInfo_.toBuilder();
}
readOpChecksumInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(readOpChecksumInfo_);
readOpChecksumInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000010;
message_ = bs;
break;
}
case 48: {
bitField0_ |= 0x00000020;
shortCircuitAccessVersion_ = input.readUInt32();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
public static final int FIRSTBADLINK_FIELD_NUMBER = 2;
private volatile java.lang.Object firstBadLink_;
/**
* optional string firstBadLink = 2;
*/
public boolean hasFirstBadLink() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string firstBadLink = 2;
*/
public java.lang.String getFirstBadLink() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
firstBadLink_ = s;
}
return s;
}
}
/**
* optional string firstBadLink = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getFirstBadLinkBytes() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
firstBadLink_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int CHECKSUMRESPONSE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_;
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public boolean hasChecksumResponse() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
}
public static final int READOPCHECKSUMINFO_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_;
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public boolean hasReadOpChecksumInfo() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
}
public static final int MESSAGE_FIELD_NUMBER = 5;
private volatile java.lang.Object message_;
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public java.lang.String getMessage() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
message_ = s;
}
return s;
}
}
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int SHORTCIRCUITACCESSVERSION_FIELD_NUMBER = 6;
private int shortCircuitAccessVersion_;
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
*/
public boolean hasShortCircuitAccessVersion() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
*/
public int getShortCircuitAccessVersion() {
return shortCircuitAccessVersion_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (hasChecksumResponse()) {
if (!getChecksumResponse().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasReadOpChecksumInfo()) {
if (!getReadOpChecksumInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, firstBadLink_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getChecksumResponse());
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeMessage(4, getReadOpChecksumInfo());
}
if (((bitField0_ & 0x00000010) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, message_);
}
if (((bitField0_ & 0x00000020) != 0)) {
output.writeUInt32(6, shortCircuitAccessVersion_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, firstBadLink_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getChecksumResponse());
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(4, getReadOpChecksumInfo());
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, message_);
}
if (((bitField0_ & 0x00000020) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(6, shortCircuitAccessVersion_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (hasFirstBadLink() != other.hasFirstBadLink()) return false;
if (hasFirstBadLink()) {
if (!getFirstBadLink()
.equals(other.getFirstBadLink())) return false;
}
if (hasChecksumResponse() != other.hasChecksumResponse()) return false;
if (hasChecksumResponse()) {
if (!getChecksumResponse()
.equals(other.getChecksumResponse())) return false;
}
if (hasReadOpChecksumInfo() != other.hasReadOpChecksumInfo()) return false;
if (hasReadOpChecksumInfo()) {
if (!getReadOpChecksumInfo()
.equals(other.getReadOpChecksumInfo())) return false;
}
if (hasMessage() != other.hasMessage()) return false;
if (hasMessage()) {
if (!getMessage()
.equals(other.getMessage())) return false;
}
if (hasShortCircuitAccessVersion() != other.hasShortCircuitAccessVersion()) return false;
if (hasShortCircuitAccessVersion()) {
if (getShortCircuitAccessVersion()
!= other.getShortCircuitAccessVersion()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
if (hasFirstBadLink()) {
hash = (37 * hash) + FIRSTBADLINK_FIELD_NUMBER;
hash = (53 * hash) + getFirstBadLink().hashCode();
}
if (hasChecksumResponse()) {
hash = (37 * hash) + CHECKSUMRESPONSE_FIELD_NUMBER;
hash = (53 * hash) + getChecksumResponse().hashCode();
}
if (hasReadOpChecksumInfo()) {
hash = (37 * hash) + READOPCHECKSUMINFO_FIELD_NUMBER;
hash = (53 * hash) + getReadOpChecksumInfo().hashCode();
}
if (hasMessage()) {
hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
hash = (53 * hash) + getMessage().hashCode();
}
if (hasShortCircuitAccessVersion()) {
hash = (37 * hash) + SHORTCIRCUITACCESSVERSION_FIELD_NUMBER;
hash = (53 * hash) + getShortCircuitAccessVersion();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlockOpResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockOpResponseProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getChecksumResponseFieldBuilder();
getReadOpChecksumInfoFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
status_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
firstBadLink_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (checksumResponseBuilder_ == null) {
checksumResponse_ = null;
} else {
checksumResponseBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfo_ = null;
} else {
readOpChecksumInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
message_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
shortCircuitAccessVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.firstBadLink_ = firstBadLink_;
if (((from_bitField0_ & 0x00000004) != 0)) {
if (checksumResponseBuilder_ == null) {
result.checksumResponse_ = checksumResponse_;
} else {
result.checksumResponse_ = checksumResponseBuilder_.build();
}
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
if (readOpChecksumInfoBuilder_ == null) {
result.readOpChecksumInfo_ = readOpChecksumInfo_;
} else {
result.readOpChecksumInfo_ = readOpChecksumInfoBuilder_.build();
}
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
to_bitField0_ |= 0x00000010;
}
result.message_ = message_;
if (((from_bitField0_ & 0x00000020) != 0)) {
result.shortCircuitAccessVersion_ = shortCircuitAccessVersion_;
to_bitField0_ |= 0x00000020;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasFirstBadLink()) {
bitField0_ |= 0x00000002;
firstBadLink_ = other.firstBadLink_;
onChanged();
}
if (other.hasChecksumResponse()) {
mergeChecksumResponse(other.getChecksumResponse());
}
if (other.hasReadOpChecksumInfo()) {
mergeReadOpChecksumInfo(other.getReadOpChecksumInfo());
}
if (other.hasMessage()) {
bitField0_ |= 0x00000010;
message_ = other.message_;
onChanged();
}
if (other.hasShortCircuitAccessVersion()) {
setShortCircuitAccessVersion(other.getShortCircuitAccessVersion());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
if (hasChecksumResponse()) {
if (!getChecksumResponse().isInitialized()) {
return false;
}
}
if (hasReadOpChecksumInfo()) {
if (!getReadOpChecksumInfo().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
private java.lang.Object firstBadLink_ = "";
/**
* optional string firstBadLink = 2;
*/
public boolean hasFirstBadLink() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string firstBadLink = 2;
*/
public java.lang.String getFirstBadLink() {
java.lang.Object ref = firstBadLink_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
firstBadLink_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string firstBadLink = 2;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getFirstBadLinkBytes() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
firstBadLink_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string firstBadLink = 2;
*/
public Builder setFirstBadLink(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
firstBadLink_ = value;
onChanged();
return this;
}
/**
* optional string firstBadLink = 2;
*/
public Builder clearFirstBadLink() {
bitField0_ = (bitField0_ & ~0x00000002);
firstBadLink_ = getDefaultInstance().getFirstBadLink();
onChanged();
return this;
}
/**
* optional string firstBadLink = 2;
*/
public Builder setFirstBadLinkBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
firstBadLink_ = value;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder> checksumResponseBuilder_;
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public boolean hasChecksumResponse() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
if (checksumResponseBuilder_ == null) {
return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
} else {
return checksumResponseBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder setChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
if (checksumResponseBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
checksumResponse_ = value;
onChanged();
} else {
checksumResponseBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder setChecksumResponse(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder builderForValue) {
if (checksumResponseBuilder_ == null) {
checksumResponse_ = builderForValue.build();
onChanged();
} else {
checksumResponseBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder mergeChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
if (checksumResponseBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
checksumResponse_ != null &&
checksumResponse_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) {
checksumResponse_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder(checksumResponse_).mergeFrom(value).buildPartial();
} else {
checksumResponse_ = value;
}
onChanged();
} else {
checksumResponseBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder clearChecksumResponse() {
if (checksumResponseBuilder_ == null) {
checksumResponse_ = null;
onChanged();
} else {
checksumResponseBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder getChecksumResponseBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getChecksumResponseFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
if (checksumResponseBuilder_ != null) {
return checksumResponseBuilder_.getMessageOrBuilder();
} else {
return checksumResponse_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
}
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>
getChecksumResponseFieldBuilder() {
if (checksumResponseBuilder_ == null) {
checksumResponseBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>(
getChecksumResponse(),
getParentForChildren(),
isClean());
checksumResponse_ = null;
}
return checksumResponseBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder> readOpChecksumInfoBuilder_;
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public boolean hasReadOpChecksumInfo() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
if (readOpChecksumInfoBuilder_ == null) {
return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
} else {
return readOpChecksumInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder setReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
if (readOpChecksumInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
readOpChecksumInfo_ = value;
onChanged();
} else {
readOpChecksumInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder setReadOpChecksumInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder builderForValue) {
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfo_ = builderForValue.build();
onChanged();
} else {
readOpChecksumInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder mergeReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
if (readOpChecksumInfoBuilder_ == null) {
if (((bitField0_ & 0x00000008) != 0) &&
readOpChecksumInfo_ != null &&
readOpChecksumInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) {
readOpChecksumInfo_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder(readOpChecksumInfo_).mergeFrom(value).buildPartial();
} else {
readOpChecksumInfo_ = value;
}
onChanged();
} else {
readOpChecksumInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder clearReadOpChecksumInfo() {
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfo_ = null;
onChanged();
} else {
readOpChecksumInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder getReadOpChecksumInfoBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getReadOpChecksumInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
if (readOpChecksumInfoBuilder_ != null) {
return readOpChecksumInfoBuilder_.getMessageOrBuilder();
} else {
return readOpChecksumInfo_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
}
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>
getReadOpChecksumInfoFieldBuilder() {
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>(
getReadOpChecksumInfo(),
getParentForChildren(),
isClean());
readOpChecksumInfo_ = null;
}
return readOpChecksumInfoBuilder_;
}
private java.lang.Object message_ = "";
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public java.lang.String getMessage() {
java.lang.Object ref = message_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
message_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public Builder setMessage(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
message_ = value;
onChanged();
return this;
}
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public Builder clearMessage() {
bitField0_ = (bitField0_ & ~0x00000010);
message_ = getDefaultInstance().getMessage();
onChanged();
return this;
}
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
*/
public Builder setMessageBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
message_ = value;
onChanged();
return this;
}
private int shortCircuitAccessVersion_ ;
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
*/
public boolean hasShortCircuitAccessVersion() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
*/
public int getShortCircuitAccessVersion() {
return shortCircuitAccessVersion_;
}
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
*/
public Builder setShortCircuitAccessVersion(int value) {
bitField0_ |= 0x00000020;
shortCircuitAccessVersion_ = value;
onChanged();
return this;
}
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
*/
public Builder clearShortCircuitAccessVersion() {
bitField0_ = (bitField0_ & ~0x00000020);
shortCircuitAccessVersion_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockOpResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockOpResponseProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public BlockOpResponseProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new BlockOpResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ClientReadStatusProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ClientReadStatusProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
}
/**
*
**
* Message sent from the client to the DN after reading the entire
* read request.
*
*
* Protobuf type {@code hadoop.hdfs.ClientReadStatusProto}
*/
public static final class ClientReadStatusProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ClientReadStatusProto)
ClientReadStatusProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ClientReadStatusProto.newBuilder() to construct.
private ClientReadStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ClientReadStatusProto() {
status_ = 0;
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ClientReadStatusProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = rawValue;
}
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Message sent from the client to the DN after reading the entire
* read request.
*
*
* Protobuf type {@code hadoop.hdfs.ClientReadStatusProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ClientReadStatusProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
status_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ClientReadStatusProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientReadStatusProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ClientReadStatusProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ClientReadStatusProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface DNTransferAckProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.DNTransferAckProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
}
/**
* Protobuf type {@code hadoop.hdfs.DNTransferAckProto}
*/
public static final class DNTransferAckProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.DNTransferAckProto)
DNTransferAckProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use DNTransferAckProto.newBuilder() to construct.
private DNTransferAckProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private DNTransferAckProto() {
status_ = 0;
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DNTransferAckProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = rawValue;
}
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DNTransferAckProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.DNTransferAckProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
status_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DNTransferAckProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DNTransferAckProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public DNTransferAckProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new DNTransferAckProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpBlockChecksumResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpBlockChecksumResponseProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required uint32 bytesPerCrc = 1;
*/
boolean hasBytesPerCrc();
/**
* required uint32 bytesPerCrc = 1;
*/
int getBytesPerCrc();
/**
* required uint64 crcPerBlock = 2;
*/
boolean hasCrcPerBlock();
/**
* required uint64 crcPerBlock = 2;
*/
long getCrcPerBlock();
/**
* required bytes blockChecksum = 3;
*/
boolean hasBlockChecksum();
/**
* required bytes blockChecksum = 3;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum();
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
boolean hasCrcType();
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
boolean hasBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumResponseProto}
*/
public static final class OpBlockChecksumResponseProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpBlockChecksumResponseProto)
OpBlockChecksumResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpBlockChecksumResponseProto.newBuilder() to construct.
private OpBlockChecksumResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpBlockChecksumResponseProto() {
blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
crcType_ = 0;
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpBlockChecksumResponseProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
bytesPerCrc_ = input.readUInt32();
break;
}
case 16: {
bitField0_ |= 0x00000002;
crcPerBlock_ = input.readUInt64();
break;
}
case 26: {
bitField0_ |= 0x00000004;
blockChecksum_ = input.readBytes();
break;
}
case 32: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
crcType_ = rawValue;
}
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) != 0)) {
subBuilder = blockChecksumOptions_.toBuilder();
}
blockChecksumOptions_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blockChecksumOptions_);
blockChecksumOptions_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
}
private int bitField0_;
public static final int BYTESPERCRC_FIELD_NUMBER = 1;
private int bytesPerCrc_;
/**
* required uint32 bytesPerCrc = 1;
*/
public boolean hasBytesPerCrc() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required uint32 bytesPerCrc = 1;
*/
public int getBytesPerCrc() {
return bytesPerCrc_;
}
public static final int CRCPERBLOCK_FIELD_NUMBER = 2;
private long crcPerBlock_;
/**
* required uint64 crcPerBlock = 2;
*/
public boolean hasCrcPerBlock() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required uint64 crcPerBlock = 2;
*/
public long getCrcPerBlock() {
return crcPerBlock_;
}
public static final int BLOCKCHECKSUM_FIELD_NUMBER = 3;
private org.apache.hadoop.thirdparty.protobuf.ByteString blockChecksum_;
/**
* required bytes blockChecksum = 3;
*/
public boolean hasBlockChecksum() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required bytes blockChecksum = 3;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum() {
return blockChecksum_;
}
public static final int CRCTYPE_FIELD_NUMBER = 4;
private int crcType_;
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public boolean hasCrcType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(crcType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
}
public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBytesPerCrc()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCrcPerBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockChecksum()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt32(1, bytesPerCrc_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(2, crcPerBlock_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeBytes(3, blockChecksum_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeEnum(4, crcType_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeMessage(5, getBlockChecksumOptions());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(1, bytesPerCrc_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(2, crcPerBlock_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBytesSize(3, blockChecksum_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(4, crcType_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(5, getBlockChecksumOptions());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) obj;
if (hasBytesPerCrc() != other.hasBytesPerCrc()) return false;
if (hasBytesPerCrc()) {
if (getBytesPerCrc()
!= other.getBytesPerCrc()) return false;
}
if (hasCrcPerBlock() != other.hasCrcPerBlock()) return false;
if (hasCrcPerBlock()) {
if (getCrcPerBlock()
!= other.getCrcPerBlock()) return false;
}
if (hasBlockChecksum() != other.hasBlockChecksum()) return false;
if (hasBlockChecksum()) {
if (!getBlockChecksum()
.equals(other.getBlockChecksum())) return false;
}
if (hasCrcType() != other.hasCrcType()) return false;
if (hasCrcType()) {
if (crcType_ != other.crcType_) return false;
}
if (hasBlockChecksumOptions() != other.hasBlockChecksumOptions()) return false;
if (hasBlockChecksumOptions()) {
if (!getBlockChecksumOptions()
.equals(other.getBlockChecksumOptions())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBytesPerCrc()) {
hash = (37 * hash) + BYTESPERCRC_FIELD_NUMBER;
hash = (53 * hash) + getBytesPerCrc();
}
if (hasCrcPerBlock()) {
hash = (37 * hash) + CRCPERBLOCK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getCrcPerBlock());
}
if (hasBlockChecksum()) {
hash = (37 * hash) + BLOCKCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksum().hashCode();
}
if (hasCrcType()) {
hash = (37 * hash) + CRCTYPE_FIELD_NUMBER;
hash = (53 * hash) + crcType_;
}
if (hasBlockChecksumOptions()) {
hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksumOptions().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpBlockChecksumResponseProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getBlockChecksumOptionsFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bytesPerCrc_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
crcPerBlock_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
crcType_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = null;
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.bytesPerCrc_ = bytesPerCrc_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.crcPerBlock_ = crcPerBlock_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
to_bitField0_ |= 0x00000004;
}
result.blockChecksum_ = blockChecksum_;
if (((from_bitField0_ & 0x00000008) != 0)) {
to_bitField0_ |= 0x00000008;
}
result.crcType_ = crcType_;
if (((from_bitField0_ & 0x00000010) != 0)) {
if (blockChecksumOptionsBuilder_ == null) {
result.blockChecksumOptions_ = blockChecksumOptions_;
} else {
result.blockChecksumOptions_ = blockChecksumOptionsBuilder_.build();
}
to_bitField0_ |= 0x00000010;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) return this;
if (other.hasBytesPerCrc()) {
setBytesPerCrc(other.getBytesPerCrc());
}
if (other.hasCrcPerBlock()) {
setCrcPerBlock(other.getCrcPerBlock());
}
if (other.hasBlockChecksum()) {
setBlockChecksum(other.getBlockChecksum());
}
if (other.hasCrcType()) {
setCrcType(other.getCrcType());
}
if (other.hasBlockChecksumOptions()) {
mergeBlockChecksumOptions(other.getBlockChecksumOptions());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBytesPerCrc()) {
return false;
}
if (!hasCrcPerBlock()) {
return false;
}
if (!hasBlockChecksum()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int bytesPerCrc_ ;
/**
* required uint32 bytesPerCrc = 1;
*/
public boolean hasBytesPerCrc() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required uint32 bytesPerCrc = 1;
*/
public int getBytesPerCrc() {
return bytesPerCrc_;
}
/**
* required uint32 bytesPerCrc = 1;
*/
public Builder setBytesPerCrc(int value) {
bitField0_ |= 0x00000001;
bytesPerCrc_ = value;
onChanged();
return this;
}
/**
* required uint32 bytesPerCrc = 1;
*/
public Builder clearBytesPerCrc() {
bitField0_ = (bitField0_ & ~0x00000001);
bytesPerCrc_ = 0;
onChanged();
return this;
}
private long crcPerBlock_ ;
/**
* required uint64 crcPerBlock = 2;
*/
public boolean hasCrcPerBlock() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required uint64 crcPerBlock = 2;
*/
public long getCrcPerBlock() {
return crcPerBlock_;
}
/**
* required uint64 crcPerBlock = 2;
*/
public Builder setCrcPerBlock(long value) {
bitField0_ |= 0x00000002;
crcPerBlock_ = value;
onChanged();
return this;
}
/**
* required uint64 crcPerBlock = 2;
*/
public Builder clearCrcPerBlock() {
bitField0_ = (bitField0_ & ~0x00000002);
crcPerBlock_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.ByteString blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
* required bytes blockChecksum = 3;
*/
public boolean hasBlockChecksum() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required bytes blockChecksum = 3;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum() {
return blockChecksum_;
}
/**
* required bytes blockChecksum = 3;
*/
public Builder setBlockChecksum(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
blockChecksum_ = value;
onChanged();
return this;
}
/**
* required bytes blockChecksum = 3;
*/
public Builder clearBlockChecksum() {
bitField0_ = (bitField0_ & ~0x00000004);
blockChecksum_ = getDefaultInstance().getBlockChecksum();
onChanged();
return this;
}
private int crcType_ = 0;
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public boolean hasCrcType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType() {
@SuppressWarnings("deprecation")
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(crcType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public Builder setCrcType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
crcType_ = value.getNumber();
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public Builder clearCrcType() {
bitField0_ = (bitField0_ & ~0x00000008);
crcType_ = 0;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
} else {
return blockChecksumOptionsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockChecksumOptions_ = value;
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder setBlockChecksumOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = builderForValue.build();
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (((bitField0_ & 0x00000010) != 0) &&
blockChecksumOptions_ != null &&
blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
blockChecksumOptions_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder(blockChecksumOptions_).mergeFrom(value).buildPartial();
} else {
blockChecksumOptions_ = value;
}
onChanged();
} else {
blockChecksumOptionsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder clearBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = null;
onChanged();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getBlockChecksumOptionsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
if (blockChecksumOptionsBuilder_ != null) {
return blockChecksumOptionsBuilder_.getMessageOrBuilder();
} else {
return blockChecksumOptions_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>
getBlockChecksumOptionsFieldBuilder() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
getBlockChecksumOptions(),
getParentForChildren(),
isClean());
blockChecksumOptions_ = null;
}
return blockChecksumOptionsBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockChecksumResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockChecksumResponseProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpBlockChecksumResponseProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpBlockChecksumResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpCustomProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpCustomProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required string customId = 1;
*/
boolean hasCustomId();
/**
* required string customId = 1;
*/
java.lang.String getCustomId();
/**
* required string customId = 1;
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getCustomIdBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.OpCustomProto}
*/
public static final class OpCustomProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpCustomProto)
OpCustomProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpCustomProto.newBuilder() to construct.
private OpCustomProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpCustomProto() {
customId_ = "";
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpCustomProto(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
customId_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.Builder.class);
}
private int bitField0_;
public static final int CUSTOMID_FIELD_NUMBER = 1;
private volatile java.lang.Object customId_;
/**
* required string customId = 1;
*/
public boolean hasCustomId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string customId = 1;
*/
public java.lang.String getCustomId() {
java.lang.Object ref = customId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
customId_ = s;
}
return s;
}
}
/**
* required string customId = 1;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getCustomIdBytes() {
java.lang.Object ref = customId_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
customId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasCustomId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, customId_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, customId_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) obj;
if (hasCustomId() != other.hasCustomId()) return false;
if (hasCustomId()) {
if (!getCustomId()
.equals(other.getCustomId())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasCustomId()) {
hash = (37 * hash) + CUSTOMID_FIELD_NUMBER;
hash = (53 * hash) + getCustomId().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpCustomProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpCustomProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
customId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.customId_ = customId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.getDefaultInstance()) return this;
if (other.hasCustomId()) {
bitField0_ |= 0x00000001;
customId_ = other.customId_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasCustomId()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object customId_ = "";
/**
* required string customId = 1;
*/
public boolean hasCustomId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string customId = 1;
*/
public java.lang.String getCustomId() {
java.lang.Object ref = customId_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
customId_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string customId = 1;
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getCustomIdBytes() {
java.lang.Object ref = customId_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
customId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string customId = 1;
*/
public Builder setCustomId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
customId_ = value;
onChanged();
return this;
}
/**
* required string customId = 1;
*/
public Builder clearCustomId() {
bitField0_ = (bitField0_ & ~0x00000001);
customId_ = getDefaultInstance().getCustomId();
onChanged();
return this;
}
/**
* required string customId = 1;
*/
public Builder setCustomIdBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
customId_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpCustomProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpCustomProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpCustomProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new OpCustomProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ChecksumProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpCustomProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable;
public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\022datatransfer.proto\022\013hadoop.hdfs\032\016Secur" +
"ity.proto\032\nhdfs.proto\"\373\002\n!DataTransferEn" +
"cryptorMessageProto\022Z\n\006status\030\001 \002(\0162J.ha" +
"doop.hdfs.DataTransferEncryptorMessagePr" +
"oto.DataTransferEncryptorStatus\022\017\n\007paylo" +
"ad\030\002 \001(\014\022\017\n\007message\030\003 \001(\t\0224\n\014cipherOptio" +
"n\030\004 \003(\0132\036.hadoop.hdfs.CipherOptionProto\022" +
":\n\017handshakeSecret\030\005 \001(\0132!.hadoop.hdfs.H" +
"andshakeSecretProto\022\030\n\020accessTokenError\030" +
"\006 \001(\010\"L\n\033DataTransferEncryptorStatus\022\013\n\007" +
"SUCCESS\020\000\022\025\n\021ERROR_UNKNOWN_KEY\020\001\022\t\n\005ERRO" +
"R\020\002\"4\n\024HandshakeSecretProto\022\016\n\006secret\030\001 " +
"\002(\014\022\014\n\004bpid\030\002 \002(\t\"\247\001\n\017BaseHeaderProto\022.\n" +
"\005block\030\001 \002(\0132\037.hadoop.hdfs.ExtendedBlock" +
"Proto\022(\n\005token\030\002 \001(\0132\031.hadoop.common.Tok" +
"enProto\022:\n\ttraceInfo\030\003 \001(\0132\'.hadoop.hdfs" +
".DataTransferTraceInfoProto\"T\n\032DataTrans" +
"ferTraceInfoProto\022\017\n\007traceId\030\001 \001(\004\022\020\n\010pa" +
"rentId\030\002 \001(\004\022\023\n\013spanContext\030\003 \001(\014\"b\n\032Cli" +
"entOperationHeaderProto\0220\n\nbaseHeader\030\001 " +
"\002(\0132\034.hadoop.hdfs.BaseHeaderProto\022\022\n\ncli" +
"entName\030\002 \002(\t\"=\n\024CachingStrategyProto\022\022\n" +
"\ndropBehind\030\001 \001(\010\022\021\n\treadahead\030\002 \001(\003\"\301\001\n" +
"\020OpReadBlockProto\0227\n\006header\030\001 \002(\0132\'.hado" +
"op.hdfs.ClientOperationHeaderProto\022\016\n\006of" +
"fset\030\002 \002(\004\022\013\n\003len\030\003 \002(\004\022\033\n\rsendChecksums" +
"\030\004 \001(\010:\004true\022:\n\017cachingStrategy\030\005 \001(\0132!." +
"hadoop.hdfs.CachingStrategyProto\"W\n\rChec" +
"ksumProto\022,\n\004type\030\001 \002(\0162\036.hadoop.hdfs.Ch" +
"ecksumTypeProto\022\030\n\020bytesPerChecksum\030\002 \002(" +
"\r\"\305\007\n\021OpWriteBlockProto\0227\n\006header\030\001 \002(\0132" +
"\'.hadoop.hdfs.ClientOperationHeaderProto" +
"\022/\n\007targets\030\002 \003(\0132\036.hadoop.hdfs.Datanode" +
"InfoProto\022.\n\006source\030\003 \001(\0132\036.hadoop.hdfs." +
"DatanodeInfoProto\022D\n\005stage\030\004 \002(\01625.hadoo" +
"p.hdfs.OpWriteBlockProto.BlockConstructi" +
"onStage\022\024\n\014pipelineSize\030\005 \002(\r\022\024\n\014minByte" +
"sRcvd\030\006 \002(\004\022\024\n\014maxBytesRcvd\030\007 \002(\004\022\035\n\025lat" +
"estGenerationStamp\030\010 \002(\004\0225\n\021requestedChe" +
"cksum\030\t \002(\0132\032.hadoop.hdfs.ChecksumProto\022" +
":\n\017cachingStrategy\030\n \001(\0132!.hadoop.hdfs.C" +
"achingStrategyProto\0228\n\013storageType\030\013 \001(\016" +
"2\035.hadoop.hdfs.StorageTypeProto:\004DISK\0229\n" +
"\022targetStorageTypes\030\014 \003(\0162\035.hadoop.hdfs." +
"StorageTypeProto\022\037\n\020allowLazyPersist\030\r \001" +
"(\010:\005false\022\026\n\007pinning\030\016 \001(\010:\005false\022\026\n\016tar" +
"getPinnings\030\017 \003(\010\022\021\n\tstorageId\030\020 \001(\t\022\030\n\020" +
"targetStorageIds\030\021 \003(\t\"\210\002\n\026BlockConstruc" +
"tionStage\022\031\n\025PIPELINE_SETUP_APPEND\020\000\022\"\n\036" +
"PIPELINE_SETUP_APPEND_RECOVERY\020\001\022\022\n\016DATA" +
"_STREAMING\020\002\022%\n!PIPELINE_SETUP_STREAMING" +
"_RECOVERY\020\003\022\022\n\016PIPELINE_CLOSE\020\004\022\033\n\027PIPEL" +
"INE_CLOSE_RECOVERY\020\005\022\031\n\025PIPELINE_SETUP_C" +
"REATE\020\006\022\020\n\014TRANSFER_RBW\020\007\022\026\n\022TRANSFER_FI" +
"NALIZED\020\010\"\325\001\n\024OpTransferBlockProto\0227\n\006he" +
"ader\030\001 \002(\0132\'.hadoop.hdfs.ClientOperation" +
"HeaderProto\022/\n\007targets\030\002 \003(\0132\036.hadoop.hd" +
"fs.DatanodeInfoProto\0229\n\022targetStorageTyp" +
"es\030\003 \003(\0162\035.hadoop.hdfs.StorageTypeProto\022" +
"\030\n\020targetStorageIds\030\004 \003(\t\"\321\001\n\023OpReplaceB" +
"lockProto\022,\n\006header\030\001 \002(\0132\034.hadoop.hdfs." +
"BaseHeaderProto\022\017\n\007delHint\030\002 \002(\t\022.\n\006sour" +
"ce\030\003 \002(\0132\036.hadoop.hdfs.DatanodeInfoProto" +
"\0228\n\013storageType\030\004 \001(\0162\035.hadoop.hdfs.Stor" +
"ageTypeProto:\004DISK\022\021\n\tstorageId\030\005 \001(\t\"@\n" +
"\020OpCopyBlockProto\022,\n\006header\030\001 \002(\0132\034.hado" +
"op.hdfs.BaseHeaderProto\"\212\001\n\024OpBlockCheck" +
"sumProto\022,\n\006header\030\001 \002(\0132\034.hadoop.hdfs.B" +
"aseHeaderProto\022D\n\024blockChecksumOptions\030\002" +
" \001(\0132&.hadoop.hdfs.BlockChecksumOptionsP" +
"roto\"\335\002\n\031OpBlockGroupChecksumProto\022,\n\006he" +
"ader\030\001 \002(\0132\034.hadoop.hdfs.BaseHeaderProto" +
"\0222\n\tdatanodes\030\002 \002(\0132\037.hadoop.hdfs.Datano" +
"deInfosProto\022.\n\013blockTokens\030\003 \003(\0132\031.hado" +
"op.common.TokenProto\0227\n\010ecPolicy\030\004 \002(\0132%" +
".hadoop.hdfs.ErasureCodingPolicyProto\022\024\n" +
"\014blockIndices\030\005 \003(\r\022\031\n\021requestedNumBytes" +
"\030\006 \002(\004\022D\n\024blockChecksumOptions\030\007 \001(\0132&.h" +
"adoop.hdfs.BlockChecksumOptionsProto\"0\n\026" +
"ShortCircuitShmIdProto\022\n\n\002hi\030\001 \002(\003\022\n\n\002lo" +
"\030\002 \002(\003\"_\n\030ShortCircuitShmSlotProto\0222\n\005sh" +
"mId\030\001 \002(\0132#.hadoop.hdfs.ShortCircuitShmI" +
"dProto\022\017\n\007slotIdx\030\002 \002(\005\"\307\001\n OpRequestSho" +
"rtCircuitAccessProto\022,\n\006header\030\001 \002(\0132\034.h" +
"adoop.hdfs.BaseHeaderProto\022\022\n\nmaxVersion" +
"\030\002 \002(\r\0225\n\006slotId\030\003 \001(\0132%.hadoop.hdfs.Sho" +
"rtCircuitShmSlotProto\022*\n\033supportsReceipt" +
"Verification\030\004 \001(\010:\005false\"\232\001\n%ReleaseSho" +
"rtCircuitAccessRequestProto\0225\n\006slotId\030\001 " +
"\002(\0132%.hadoop.hdfs.ShortCircuitShmSlotPro" +
"to\022:\n\ttraceInfo\030\002 \001(\0132\'.hadoop.hdfs.Data" +
"TransferTraceInfoProto\"\\\n&ReleaseShortCi" +
"rcuitAccessResponseProto\022#\n\006status\030\001 \002(\016" +
"2\023.hadoop.hdfs.Status\022\r\n\005error\030\002 \001(\t\"m\n\033" +
"ShortCircuitShmRequestProto\022\022\n\nclientNam" +
"e\030\001 \002(\t\022:\n\ttraceInfo\030\002 \001(\0132\'.hadoop.hdfs" +
".DataTransferTraceInfoProto\"\203\001\n\034ShortCir" +
"cuitShmResponseProto\022#\n\006status\030\001 \002(\0162\023.h" +
"adoop.hdfs.Status\022\r\n\005error\030\002 \001(\t\022/\n\002id\030\003" +
" \001(\0132#.hadoop.hdfs.ShortCircuitShmIdProt" +
"o\"\177\n\021PacketHeaderProto\022\025\n\roffsetInBlock\030" +
"\001 \002(\020\022\r\n\005seqno\030\002 \002(\020\022\031\n\021lastPacketInBloc" +
"k\030\003 \002(\010\022\017\n\007dataLen\030\004 \002(\017\022\030\n\tsyncBlock\030\005 " +
"\001(\010:\005false\"z\n\020PipelineAckProto\022\r\n\005seqno\030" +
"\001 \002(\022\022\"\n\005reply\030\002 \003(\0162\023.hadoop.hdfs.Statu" +
"s\022!\n\026downstreamAckTimeNanos\030\003 \001(\004:\0010\022\020\n\004" +
"flag\030\004 \003(\rB\002\020\001\"\\\n\027ReadOpChecksumInfoProt" +
"o\022,\n\010checksum\030\001 \002(\0132\032.hadoop.hdfs.Checks" +
"umProto\022\023\n\013chunkOffset\030\002 \002(\004\"\214\002\n\024BlockOp" +
"ResponseProto\022#\n\006status\030\001 \002(\0162\023.hadoop.h" +
"dfs.Status\022\024\n\014firstBadLink\030\002 \001(\t\022C\n\020chec" +
"ksumResponse\030\003 \001(\0132).hadoop.hdfs.OpBlock" +
"ChecksumResponseProto\022@\n\022readOpChecksumI" +
"nfo\030\004 \001(\0132$.hadoop.hdfs.ReadOpChecksumIn" +
"foProto\022\017\n\007message\030\005 \001(\t\022!\n\031shortCircuit" +
"AccessVersion\030\006 \001(\r\"<\n\025ClientReadStatusP" +
"roto\022#\n\006status\030\001 \002(\0162\023.hadoop.hdfs.Statu" +
"s\"9\n\022DNTransferAckProto\022#\n\006status\030\001 \002(\0162" +
"\023.hadoop.hdfs.Status\"\326\001\n\034OpBlockChecksum" +
"ResponseProto\022\023\n\013bytesPerCrc\030\001 \002(\r\022\023\n\013cr" +
"cPerBlock\030\002 \002(\004\022\025\n\rblockChecksum\030\003 \002(\014\022/" +
"\n\007crcType\030\004 \001(\0162\036.hadoop.hdfs.ChecksumTy" +
"peProto\022D\n\024blockChecksumOptions\030\005 \001(\0132&." +
"hadoop.hdfs.BlockChecksumOptionsProto\"!\n" +
"\rOpCustomProto\022\020\n\010customId\030\001 \002(\t*\214\002\n\006Sta" +
"tus\022\013\n\007SUCCESS\020\000\022\t\n\005ERROR\020\001\022\022\n\016ERROR_CHE" +
"CKSUM\020\002\022\021\n\rERROR_INVALID\020\003\022\020\n\014ERROR_EXIS" +
"TS\020\004\022\026\n\022ERROR_ACCESS_TOKEN\020\005\022\017\n\013CHECKSUM" +
"_OK\020\006\022\025\n\021ERROR_UNSUPPORTED\020\007\022\017\n\013OOB_REST" +
"ART\020\010\022\021\n\rOOB_RESERVED1\020\t\022\021\n\rOOB_RESERVED" +
"2\020\n\022\021\n\rOOB_RESERVED3\020\013\022\017\n\013IN_PROGRESS\020\014\022" +
"\026\n\022ERROR_BLOCK_PINNED\020\r*[\n\026ShortCircuitF" +
"dResponse\022#\n\037DO_NOT_USE_RECEIPT_VERIFICA" +
"TION\020\000\022\034\n\030USE_RECEIPT_VERIFICATION\020\001B>\n%" +
"org.apache.hadoop.hdfs.protocol.protoB\022D" +
"ataTransferProtos\240\001\001"
};
org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
public org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry assignDescriptors(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
return null;
}
};
org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
}, assigner);
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor,
new java.lang.String[] { "Status", "Payload", "Message", "CipherOption", "HandshakeSecret", "AccessTokenError", });
internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor,
new java.lang.String[] { "Secret", "Bpid", });
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor,
new java.lang.String[] { "Block", "Token", "TraceInfo", });
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor,
new java.lang.String[] { "TraceId", "ParentId", "SpanContext", });
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor,
new java.lang.String[] { "BaseHeader", "ClientName", });
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor,
new java.lang.String[] { "DropBehind", "Readahead", });
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor,
new java.lang.String[] { "Header", "Offset", "Len", "SendChecksums", "CachingStrategy", });
internal_static_hadoop_hdfs_ChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ChecksumProto_descriptor,
new java.lang.String[] { "Type", "BytesPerChecksum", });
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor,
new java.lang.String[] { "Header", "Targets", "Source", "Stage", "PipelineSize", "MinBytesRcvd", "MaxBytesRcvd", "LatestGenerationStamp", "RequestedChecksum", "CachingStrategy", "StorageType", "TargetStorageTypes", "AllowLazyPersist", "Pinning", "TargetPinnings", "StorageId", "TargetStorageIds", });
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor,
new java.lang.String[] { "Header", "Targets", "TargetStorageTypes", "TargetStorageIds", });
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor,
new java.lang.String[] { "Header", "DelHint", "Source", "StorageType", "StorageId", });
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor,
new java.lang.String[] { "Header", });
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor,
new java.lang.String[] { "Header", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(13);
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor,
new java.lang.String[] { "Header", "Datanodes", "BlockTokens", "EcPolicy", "BlockIndices", "RequestedNumBytes", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor =
getDescriptor().getMessageTypes().get(14);
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor,
new java.lang.String[] { "Hi", "Lo", });
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor,
new java.lang.String[] { "ShmId", "SlotIdx", });
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor =
getDescriptor().getMessageTypes().get(16);
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor,
new java.lang.String[] { "Header", "MaxVersion", "SlotId", "SupportsReceiptVerification", });
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor =
getDescriptor().getMessageTypes().get(17);
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor,
new java.lang.String[] { "SlotId", "TraceInfo", });
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor =
getDescriptor().getMessageTypes().get(18);
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor,
new java.lang.String[] { "Status", "Error", });
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor =
getDescriptor().getMessageTypes().get(19);
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor,
new java.lang.String[] { "ClientName", "TraceInfo", });
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor =
getDescriptor().getMessageTypes().get(20);
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor,
new java.lang.String[] { "Status", "Error", "Id", });
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(21);
internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor,
new java.lang.String[] { "OffsetInBlock", "Seqno", "LastPacketInBlock", "DataLen", "SyncBlock", });
internal_static_hadoop_hdfs_PipelineAckProto_descriptor =
getDescriptor().getMessageTypes().get(22);
internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_PipelineAckProto_descriptor,
new java.lang.String[] { "Seqno", "Reply", "DownstreamAckTimeNanos", "Flag", });
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor =
getDescriptor().getMessageTypes().get(23);
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor,
new java.lang.String[] { "Checksum", "ChunkOffset", });
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor =
getDescriptor().getMessageTypes().get(24);
internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor,
new java.lang.String[] { "Status", "FirstBadLink", "ChecksumResponse", "ReadOpChecksumInfo", "Message", "ShortCircuitAccessVersion", });
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor =
getDescriptor().getMessageTypes().get(25);
internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor,
new java.lang.String[] { "Status", });
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor =
getDescriptor().getMessageTypes().get(26);
internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor,
new java.lang.String[] { "Status", });
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor =
getDescriptor().getMessageTypes().get(27);
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor,
new java.lang.String[] { "BytesPerCrc", "CrcPerBlock", "BlockChecksum", "CrcType", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_OpCustomProto_descriptor =
getDescriptor().getMessageTypes().get(28);
internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpCustomProto_descriptor,
new java.lang.String[] { "CustomId", });
org.apache.hadoop.security.proto.SecurityProtos.getDescriptor();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
}