Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: datatransfer.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class DataTransferProtos {
private DataTransferProtos() {}
public static void registerAllExtensions(
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
}
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
* @return Whether the cachingStrategy field is set.
*/
boolean hasCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
* @return The cachingStrategy.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
* @return Whether the storageType field is set.
*/
boolean hasStorageType();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
* @return The storageType.
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @return A list containing the targetStorageTypes.
*/
java.util.List getTargetStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @return The count of targetStorageTypes.
*/
int getTargetStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @param index The index of the element to return.
* @return The targetStorageTypes at the given index.
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index);
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
* @return Whether the allowLazyPersist field is set.
*/
boolean hasAllowLazyPersist();
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
return requestedChecksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
}
public static final int CACHINGSTRATEGY_FIELD_NUMBER = 10;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
* @return Whether the cachingStrategy field is set.
*/
@java.lang.Override
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000100) != 0);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
* @return The cachingStrategy.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
public static final int STORAGETYPE_FIELD_NUMBER = 11;
private int storageType_ = 1;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
* @return Whether the storageType field is set.
*/
@java.lang.Override public boolean hasStorageType() {
return ((bitField0_ & 0x00000200) != 0);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
* @return The storageType.
*/
@java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 12;
@SuppressWarnings("serial")
private java.util.List targetStorageTypes_;
private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> targetStorageTypes_converter_ =
new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() {
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
};
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @return A list containing the targetStorageTypes.
*/
@java.lang.Override
public java.util.List getTargetStorageTypesList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @return The count of targetStorageTypes.
*/
@java.lang.Override
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @param index The index of the element to return.
* @return The targetStorageTypes at the given index.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
}
public static final int ALLOWLAZYPERSIST_FIELD_NUMBER = 13;
private boolean allowLazyPersist_ = false;
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
* @return Whether the allowLazyPersist field is set.
*/
@java.lang.Override
public boolean hasAllowLazyPersist() {
return ((bitField0_ & 0x00000400) != 0);
}
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
* @return The allowLazyPersist.
*/
@java.lang.Override
public boolean getAllowLazyPersist() {
return allowLazyPersist_;
}
public static final int PINNING_FIELD_NUMBER = 14;
private boolean pinning_ = false;
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
* @return Whether the pinning field is set.
*/
@java.lang.Override
public boolean hasPinning() {
return ((bitField0_ & 0x00000800) != 0);
}
/**
*
*whether to pin the block, so Balancer won't move it.
*
*
* optional bool pinning = 14 [default = false];
* @return The pinning.
*/
@java.lang.Override
public boolean getPinning() {
return pinning_;
}
public static final int TARGETPINNINGS_FIELD_NUMBER = 15;
@SuppressWarnings("serial")
private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList targetPinnings_;
/**
* repeated bool targetPinnings = 15;
* @return A list containing the targetPinnings.
*/
@java.lang.Override
public java.util.List
getTargetPinningsList() {
return targetPinnings_;
}
/**
* repeated bool targetPinnings = 15;
* @return The count of targetPinnings.
*/
public int getTargetPinningsCount() {
return targetPinnings_.size();
}
/**
* repeated bool targetPinnings = 15;
* @param index The index of the element to return.
* @return The targetPinnings at the given index.
*/
public boolean getTargetPinnings(int index) {
return targetPinnings_.getBoolean(index);
}
public static final int STORAGEID_FIELD_NUMBER = 16;
@SuppressWarnings("serial")
private volatile java.lang.Object storageId_ = "";
/**
* optional string storageId = 16;
* @return Whether the storageId field is set.
*/
@java.lang.Override
public boolean hasStorageId() {
return ((bitField0_ & 0x00001000) != 0);
}
/**
* optional string storageId = 16;
* @return The storageId.
*/
@java.lang.Override
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageId_ = s;
}
return s;
}
}
/**
* optional string storageId = 16;
* @return The bytes for storageId.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int TARGETSTORAGEIDS_FIELD_NUMBER = 17;
@SuppressWarnings("serial")
private org.apache.hadoop.thirdparty.protobuf.LazyStringList targetStorageIds_;
/**
* repeated string targetStorageIds = 17;
* @return A list containing the targetStorageIds.
*/
public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
getTargetStorageIdsList() {
return targetStorageIds_;
}
/**
* repeated string targetStorageIds = 17;
* @return The count of targetStorageIds.
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 17;
* @param index The index of the element to return.
* @return The targetStorageIds at the given index.
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 17;
* @param index The index of the value to return.
* @return The bytes of the targetStorageIds at the given index.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStage()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPipelineSize()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMinBytesRcvd()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMaxBytesRcvd()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLatestGenerationStamp()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasRequestedChecksum()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasSource()) {
if (!getSource().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getRequestedChecksum().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
for (int i = 0; i < targets_.size(); i++) {
output.writeMessage(2, targets_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(3, getSource());
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeEnum(4, stage_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeUInt32(5, pipelineSize_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeUInt64(6, minBytesRcvd_);
}
if (((bitField0_ & 0x00000020) != 0)) {
output.writeUInt64(7, maxBytesRcvd_);
}
if (((bitField0_ & 0x00000040) != 0)) {
output.writeUInt64(8, latestGenerationStamp_);
}
if (((bitField0_ & 0x00000080) != 0)) {
output.writeMessage(9, getRequestedChecksum());
}
if (((bitField0_ & 0x00000100) != 0)) {
output.writeMessage(10, getCachingStrategy());
}
if (((bitField0_ & 0x00000200) != 0)) {
output.writeEnum(11, storageType_);
}
for (int i = 0; i < targetStorageTypes_.size(); i++) {
output.writeEnum(12, targetStorageTypes_.get(i));
}
if (((bitField0_ & 0x00000400) != 0)) {
output.writeBool(13, allowLazyPersist_);
}
if (((bitField0_ & 0x00000800) != 0)) {
output.writeBool(14, pinning_);
}
for (int i = 0; i < targetPinnings_.size(); i++) {
output.writeBool(15, targetPinnings_.getBoolean(i));
}
if (((bitField0_ & 0x00001000) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 16, storageId_);
}
for (int i = 0; i < targetStorageIds_.size(); i++) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 17, targetStorageIds_.getRaw(i));
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
for (int i = 0; i < targets_.size(); i++) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(2, targets_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getSource());
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(4, stage_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(5, pipelineSize_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(6, minBytesRcvd_);
}
if (((bitField0_ & 0x00000020) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(7, maxBytesRcvd_);
}
if (((bitField0_ & 0x00000040) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(8, latestGenerationStamp_);
}
if (((bitField0_ & 0x00000080) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(9, getRequestedChecksum());
}
if (((bitField0_ & 0x00000100) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(10, getCachingStrategy());
}
if (((bitField0_ & 0x00000200) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(11, storageType_);
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageTypes_.size(); i++) {
dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSizeNoTag(targetStorageTypes_.get(i));
}
size += dataSize;
size += 1 * targetStorageTypes_.size();
}
if (((bitField0_ & 0x00000400) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(13, allowLazyPersist_);
}
if (((bitField0_ & 0x00000800) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(14, pinning_);
}
{
int dataSize = 0;
dataSize = 1 * getTargetPinningsList().size();
size += dataSize;
size += 1 * getTargetPinningsList().size();
}
if (((bitField0_ & 0x00001000) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(16, storageId_);
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageIds_.size(); i++) {
dataSize += computeStringSizeNoTag(targetStorageIds_.getRaw(i));
}
size += dataSize;
size += 2 * getTargetStorageIdsList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (!getTargetsList()
.equals(other.getTargetsList())) return false;
if (hasSource() != other.hasSource()) return false;
if (hasSource()) {
if (!getSource()
.equals(other.getSource())) return false;
}
if (hasStage() != other.hasStage()) return false;
if (hasStage()) {
if (stage_ != other.stage_) return false;
}
if (hasPipelineSize() != other.hasPipelineSize()) return false;
if (hasPipelineSize()) {
if (getPipelineSize()
!= other.getPipelineSize()) return false;
}
if (hasMinBytesRcvd() != other.hasMinBytesRcvd()) return false;
if (hasMinBytesRcvd()) {
if (getMinBytesRcvd()
!= other.getMinBytesRcvd()) return false;
}
if (hasMaxBytesRcvd() != other.hasMaxBytesRcvd()) return false;
if (hasMaxBytesRcvd()) {
if (getMaxBytesRcvd()
!= other.getMaxBytesRcvd()) return false;
}
if (hasLatestGenerationStamp() != other.hasLatestGenerationStamp()) return false;
if (hasLatestGenerationStamp()) {
if (getLatestGenerationStamp()
!= other.getLatestGenerationStamp()) return false;
}
if (hasRequestedChecksum() != other.hasRequestedChecksum()) return false;
if (hasRequestedChecksum()) {
if (!getRequestedChecksum()
.equals(other.getRequestedChecksum())) return false;
}
if (hasCachingStrategy() != other.hasCachingStrategy()) return false;
if (hasCachingStrategy()) {
if (!getCachingStrategy()
.equals(other.getCachingStrategy())) return false;
}
if (hasStorageType() != other.hasStorageType()) return false;
if (hasStorageType()) {
if (storageType_ != other.storageType_) return false;
}
if (!targetStorageTypes_.equals(other.targetStorageTypes_)) return false;
if (hasAllowLazyPersist() != other.hasAllowLazyPersist()) return false;
if (hasAllowLazyPersist()) {
if (getAllowLazyPersist()
!= other.getAllowLazyPersist()) return false;
}
if (hasPinning() != other.hasPinning()) return false;
if (hasPinning()) {
if (getPinning()
!= other.getPinning()) return false;
}
if (!getTargetPinningsList()
.equals(other.getTargetPinningsList())) return false;
if (hasStorageId() != other.hasStorageId()) return false;
if (hasStorageId()) {
if (!getStorageId()
.equals(other.getStorageId())) return false;
}
if (!getTargetStorageIdsList()
.equals(other.getTargetStorageIdsList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (getTargetsCount() > 0) {
hash = (37 * hash) + TARGETS_FIELD_NUMBER;
hash = (53 * hash) + getTargetsList().hashCode();
}
if (hasSource()) {
hash = (37 * hash) + SOURCE_FIELD_NUMBER;
hash = (53 * hash) + getSource().hashCode();
}
if (hasStage()) {
hash = (37 * hash) + STAGE_FIELD_NUMBER;
hash = (53 * hash) + stage_;
}
if (hasPipelineSize()) {
hash = (37 * hash) + PIPELINESIZE_FIELD_NUMBER;
hash = (53 * hash) + getPipelineSize();
}
if (hasMinBytesRcvd()) {
hash = (37 * hash) + MINBYTESRCVD_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getMinBytesRcvd());
}
if (hasMaxBytesRcvd()) {
hash = (37 * hash) + MAXBYTESRCVD_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getMaxBytesRcvd());
}
if (hasLatestGenerationStamp()) {
hash = (37 * hash) + LATESTGENERATIONSTAMP_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getLatestGenerationStamp());
}
if (hasRequestedChecksum()) {
hash = (37 * hash) + REQUESTEDCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getRequestedChecksum().hashCode();
}
if (hasCachingStrategy()) {
hash = (37 * hash) + CACHINGSTRATEGY_FIELD_NUMBER;
hash = (53 * hash) + getCachingStrategy().hashCode();
}
if (hasStorageType()) {
hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
hash = (53 * hash) + storageType_;
}
if (getTargetStorageTypesCount() > 0) {
hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + targetStorageTypes_.hashCode();
}
if (hasAllowLazyPersist()) {
hash = (37 * hash) + ALLOWLAZYPERSIST_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getAllowLazyPersist());
}
if (hasPinning()) {
hash = (37 * hash) + PINNING_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getPinning());
}
if (getTargetPinningsCount() > 0) {
hash = (37 * hash) + TARGETPINNINGS_FIELD_NUMBER;
hash = (53 * hash) + getTargetPinningsList().hashCode();
}
if (hasStorageId()) {
hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
hash = (53 * hash) + getStorageId().hashCode();
}
if (getTargetStorageIdsCount() > 0) {
hash = (37 * hash) + TARGETSTORAGEIDS_FIELD_NUMBER;
hash = (53 * hash) + getTargetStorageIdsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpWriteBlockProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpWriteBlockProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getTargetsFieldBuilder();
getSourceFieldBuilder();
getRequestedChecksumFieldBuilder();
getCachingStrategyFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
header_ = null;
if (headerBuilder_ != null) {
headerBuilder_.dispose();
headerBuilder_ = null;
}
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
} else {
targets_ = null;
targetsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
source_ = null;
if (sourceBuilder_ != null) {
sourceBuilder_.dispose();
sourceBuilder_ = null;
}
stage_ = 0;
pipelineSize_ = 0;
minBytesRcvd_ = 0L;
maxBytesRcvd_ = 0L;
latestGenerationStamp_ = 0L;
requestedChecksum_ = null;
if (requestedChecksumBuilder_ != null) {
requestedChecksumBuilder_.dispose();
requestedChecksumBuilder_ = null;
}
cachingStrategy_ = null;
if (cachingStrategyBuilder_ != null) {
cachingStrategyBuilder_.dispose();
cachingStrategyBuilder_ = null;
}
storageType_ = 1;
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000800);
allowLazyPersist_ = false;
pinning_ = false;
targetPinnings_ = emptyBooleanList();
storageId_ = "";
targetStorageIds_ = org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto(this);
buildPartialRepeatedFields(result);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result) {
if (targetsBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.targets_ = targets_;
} else {
result.targets_ = targetsBuilder_.build();
}
if (((bitField0_ & 0x00000800) != 0)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
bitField0_ = (bitField0_ & ~0x00000800);
}
result.targetStorageTypes_ = targetStorageTypes_;
if (((bitField0_ & 0x00004000) != 0)) {
targetPinnings_.makeImmutable();
bitField0_ = (bitField0_ & ~0x00004000);
}
result.targetPinnings_ = targetPinnings_;
if (((bitField0_ & 0x00010000) != 0)) {
targetStorageIds_ = targetStorageIds_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00010000);
}
result.targetStorageIds_ = targetStorageIds_;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.header_ = headerBuilder_ == null
? header_
: headerBuilder_.build();
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.source_ = sourceBuilder_ == null
? source_
: sourceBuilder_.build();
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.stage_ = stage_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.pipelineSize_ = pipelineSize_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
result.minBytesRcvd_ = minBytesRcvd_;
to_bitField0_ |= 0x00000010;
}
if (((from_bitField0_ & 0x00000040) != 0)) {
result.maxBytesRcvd_ = maxBytesRcvd_;
to_bitField0_ |= 0x00000020;
}
if (((from_bitField0_ & 0x00000080) != 0)) {
result.latestGenerationStamp_ = latestGenerationStamp_;
to_bitField0_ |= 0x00000040;
}
if (((from_bitField0_ & 0x00000100) != 0)) {
result.requestedChecksum_ = requestedChecksumBuilder_ == null
? requestedChecksum_
: requestedChecksumBuilder_.build();
to_bitField0_ |= 0x00000080;
}
if (((from_bitField0_ & 0x00000200) != 0)) {
result.cachingStrategy_ = cachingStrategyBuilder_ == null
? cachingStrategy_
: cachingStrategyBuilder_.build();
to_bitField0_ |= 0x00000100;
}
if (((from_bitField0_ & 0x00000400) != 0)) {
result.storageType_ = storageType_;
to_bitField0_ |= 0x00000200;
}
if (((from_bitField0_ & 0x00001000) != 0)) {
result.allowLazyPersist_ = allowLazyPersist_;
to_bitField0_ |= 0x00000400;
}
if (((from_bitField0_ & 0x00002000) != 0)) {
result.pinning_ = pinning_;
to_bitField0_ |= 0x00000800;
}
if (((from_bitField0_ & 0x00008000) != 0)) {
result.storageId_ = storageId_;
to_bitField0_ |= 0x00001000;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (targetsBuilder_ == null) {
if (!other.targets_.isEmpty()) {
if (targets_.isEmpty()) {
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureTargetsIsMutable();
targets_.addAll(other.targets_);
}
onChanged();
}
} else {
if (!other.targets_.isEmpty()) {
if (targetsBuilder_.isEmpty()) {
targetsBuilder_.dispose();
targetsBuilder_ = null;
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
targetsBuilder_ =
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getTargetsFieldBuilder() : null;
} else {
targetsBuilder_.addAllMessages(other.targets_);
}
}
}
if (other.hasSource()) {
mergeSource(other.getSource());
}
if (other.hasStage()) {
setStage(other.getStage());
}
if (other.hasPipelineSize()) {
setPipelineSize(other.getPipelineSize());
}
if (other.hasMinBytesRcvd()) {
setMinBytesRcvd(other.getMinBytesRcvd());
}
if (other.hasMaxBytesRcvd()) {
setMaxBytesRcvd(other.getMaxBytesRcvd());
}
if (other.hasLatestGenerationStamp()) {
setLatestGenerationStamp(other.getLatestGenerationStamp());
}
if (other.hasRequestedChecksum()) {
mergeRequestedChecksum(other.getRequestedChecksum());
}
if (other.hasCachingStrategy()) {
mergeCachingStrategy(other.getCachingStrategy());
}
if (other.hasStorageType()) {
setStorageType(other.getStorageType());
}
if (!other.targetStorageTypes_.isEmpty()) {
if (targetStorageTypes_.isEmpty()) {
targetStorageTypes_ = other.targetStorageTypes_;
bitField0_ = (bitField0_ & ~0x00000800);
} else {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.addAll(other.targetStorageTypes_);
}
onChanged();
}
if (other.hasAllowLazyPersist()) {
setAllowLazyPersist(other.getAllowLazyPersist());
}
if (other.hasPinning()) {
setPinning(other.getPinning());
}
if (!other.targetPinnings_.isEmpty()) {
if (targetPinnings_.isEmpty()) {
targetPinnings_ = other.targetPinnings_;
bitField0_ = (bitField0_ & ~0x00004000);
} else {
ensureTargetPinningsIsMutable();
targetPinnings_.addAll(other.targetPinnings_);
}
onChanged();
}
if (other.hasStorageId()) {
storageId_ = other.storageId_;
bitField0_ |= 0x00008000;
onChanged();
}
if (!other.targetStorageIds_.isEmpty()) {
if (targetStorageIds_.isEmpty()) {
targetStorageIds_ = other.targetStorageIds_;
bitField0_ = (bitField0_ & ~0x00010000);
} else {
ensureTargetStorageIdsIsMutable();
targetStorageIds_.addAll(other.targetStorageIds_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasStage()) {
return false;
}
if (!hasPipelineSize()) {
return false;
}
if (!hasMinBytesRcvd()) {
return false;
}
if (!hasMaxBytesRcvd()) {
return false;
}
if (!hasLatestGenerationStamp()) {
return false;
}
if (!hasRequestedChecksum()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
return false;
}
}
if (hasSource()) {
if (!getSource().isInitialized()) {
return false;
}
}
if (!getRequestedChecksum().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
input.readMessage(
getHeaderFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000001;
break;
} // case 10
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto m =
input.readMessage(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER,
extensionRegistry);
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(m);
} else {
targetsBuilder_.addMessage(m);
}
break;
} // case 18
case 26: {
input.readMessage(
getSourceFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000004;
break;
} // case 26
case 32: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage tmpValue =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(4, tmpRaw);
} else {
stage_ = tmpRaw;
bitField0_ |= 0x00000008;
}
break;
} // case 32
case 40: {
pipelineSize_ = input.readUInt32();
bitField0_ |= 0x00000010;
break;
} // case 40
case 48: {
minBytesRcvd_ = input.readUInt64();
bitField0_ |= 0x00000020;
break;
} // case 48
case 56: {
maxBytesRcvd_ = input.readUInt64();
bitField0_ |= 0x00000040;
break;
} // case 56
case 64: {
latestGenerationStamp_ = input.readUInt64();
bitField0_ |= 0x00000080;
break;
} // case 64
case 74: {
input.readMessage(
getRequestedChecksumFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000100;
break;
} // case 74
case 82: {
input.readMessage(
getCachingStrategyFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000200;
break;
} // case 82
case 88: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(11, tmpRaw);
} else {
storageType_ = tmpRaw;
bitField0_ |= 0x00000400;
}
break;
} // case 88
case 96: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(12, tmpRaw);
} else {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(tmpRaw);
}
break;
} // case 96
case 98: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(12, tmpRaw);
} else {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(tmpRaw);
}
}
input.popLimit(oldLimit);
break;
} // case 98
case 104: {
allowLazyPersist_ = input.readBool();
bitField0_ |= 0x00001000;
break;
} // case 104
case 112: {
pinning_ = input.readBool();
bitField0_ |= 0x00002000;
break;
} // case 112
case 120: {
boolean v = input.readBool();
ensureTargetPinningsIsMutable();
targetPinnings_.addBoolean(v);
break;
} // case 120
case 122: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
ensureTargetPinningsIsMutable();
while (input.getBytesUntilLimit() > 0) {
targetPinnings_.addBoolean(input.readBool());
}
input.popLimit(limit);
break;
} // case 122
case 130: {
storageId_ = input.readBytes();
bitField0_ |= 0x00008000;
break;
} // case 130
case 138: {
org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(bs);
break;
} // case 138
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
* @return Whether the header field is set.
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
* @return The header.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
getHeaderBuilder().mergeFrom(value);
} else {
header_ = value;
}
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder clearHeader() {
bitField0_ = (bitField0_ & ~0x00000001);
header_ = null;
if (headerBuilder_ != null) {
headerBuilder_.dispose();
headerBuilder_ = null;
}
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private java.util.List targets_ =
java.util.Collections.emptyList();
private void ensureTargetsIsMutable() {
if (!((bitField0_ & 0x00000002) != 0)) {
targets_ = new java.util.ArrayList(targets_);
bitField0_ |= 0x00000002;
}
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
if (targetsBuilder_ == null) {
return java.util.Collections.unmodifiableList(targets_);
} else {
return targetsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
if (targetsBuilder_ == null) {
return targets_.size();
} else {
return targetsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
if (targetsBuilder_ == null) {
return targets_.get(index);
} else {
return targetsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.set(index, value);
onChanged();
} else {
targetsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.set(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(value);
onChanged();
} else {
targetsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(index, value);
onChanged();
} else {
targetsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addAllTargets(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, targets_);
onChanged();
} else {
targetsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder clearTargets() {
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
targetsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder removeTargets(int index) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.remove(index);
onChanged();
} else {
targetsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
int index) {
return getTargetsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
if (targetsBuilder_ == null) {
return targets_.get(index); } else {
return targetsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
if (targetsBuilder_ != null) {
return targetsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targets_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
return getTargetsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
int index) {
return getTargetsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List
getTargetsBuilderList() {
return getTargetsFieldBuilder().getBuilderList();
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsFieldBuilder() {
if (targetsBuilder_ == null) {
targetsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
targets_,
((bitField0_ & 0x00000002) != 0),
getParentForChildren(),
isClean());
targets_ = null;
}
return targetsBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
* @return Whether the source field is set.
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
* @return The source.
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
if (sourceBuilder_ == null) {
return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
} else {
return sourceBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
source_ = value;
} else {
sourceBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (sourceBuilder_ == null) {
source_ = builderForValue.build();
} else {
sourceBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
source_ != null &&
source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
getSourceBuilder().mergeFrom(value);
} else {
source_ = value;
}
} else {
sourceBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder clearSource() {
bitField0_ = (bitField0_ & ~0x00000004);
source_ = null;
if (sourceBuilder_ != null) {
sourceBuilder_.dispose();
sourceBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getSourceFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
if (sourceBuilder_ != null) {
return sourceBuilder_.getMessageOrBuilder();
} else {
return source_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
}
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getSourceFieldBuilder() {
if (sourceBuilder_ == null) {
sourceBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
getSource(),
getParentForChildren(),
isClean());
source_ = null;
}
return sourceBuilder_;
}
private int stage_ = 0;
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
* @return Whether the stage field is set.
*/
@java.lang.Override public boolean hasStage() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
* @return The stage.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.forNumber(stage_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND : result;
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
* @param value The stage to set.
* @return This builder for chaining.
*/
public Builder setStage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
stage_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
* @return This builder for chaining.
*/
public Builder clearStage() {
bitField0_ = (bitField0_ & ~0x00000008);
stage_ = 0;
onChanged();
return this;
}
private int pipelineSize_ ;
/**
* required uint32 pipelineSize = 5;
* @return Whether the pipelineSize field is set.
*/
@java.lang.Override
public boolean hasPipelineSize() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* required uint32 pipelineSize = 5;
* @return The pipelineSize.
*/
@java.lang.Override
public int getPipelineSize() {
return pipelineSize_;
}
/**
* required uint32 pipelineSize = 5;
* @param value The pipelineSize to set.
* @return This builder for chaining.
*/
public Builder setPipelineSize(int value) {
pipelineSize_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
* required uint32 pipelineSize = 5;
* @return This builder for chaining.
*/
public Builder clearPipelineSize() {
bitField0_ = (bitField0_ & ~0x00000010);
pipelineSize_ = 0;
onChanged();
return this;
}
private long minBytesRcvd_ ;
/**
* required uint64 minBytesRcvd = 6;
* @return Whether the minBytesRcvd field is set.
*/
@java.lang.Override
public boolean hasMinBytesRcvd() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* required uint64 minBytesRcvd = 6;
* @return The minBytesRcvd.
*/
@java.lang.Override
public long getMinBytesRcvd() {
return minBytesRcvd_;
}
/**
* required uint64 minBytesRcvd = 6;
* @param value The minBytesRcvd to set.
* @return This builder for chaining.
*/
public Builder setMinBytesRcvd(long value) {
minBytesRcvd_ = value;
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
* required uint64 minBytesRcvd = 6;
* @return This builder for chaining.
*/
public Builder clearMinBytesRcvd() {
bitField0_ = (bitField0_ & ~0x00000020);
minBytesRcvd_ = 0L;
onChanged();
return this;
}
private long maxBytesRcvd_ ;
/**
* required uint64 maxBytesRcvd = 7;
* @return Whether the maxBytesRcvd field is set.
*/
@java.lang.Override
public boolean hasMaxBytesRcvd() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* required uint64 maxBytesRcvd = 7;
* @return The maxBytesRcvd.
*/
@java.lang.Override
public long getMaxBytesRcvd() {
return maxBytesRcvd_;
}
/**
* required uint64 maxBytesRcvd = 7;
* @param value The maxBytesRcvd to set.
* @return This builder for chaining.
*/
public Builder setMaxBytesRcvd(long value) {
maxBytesRcvd_ = value;
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
* required uint64 maxBytesRcvd = 7;
* @return This builder for chaining.
*/
public Builder clearMaxBytesRcvd() {
bitField0_ = (bitField0_ & ~0x00000040);
maxBytesRcvd_ = 0L;
onChanged();
return this;
}
private long latestGenerationStamp_ ;
/**
* required uint64 latestGenerationStamp = 8;
* @return Whether the latestGenerationStamp field is set.
*/
@java.lang.Override
public boolean hasLatestGenerationStamp() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
* required uint64 latestGenerationStamp = 8;
* @return The latestGenerationStamp.
*/
@java.lang.Override
public long getLatestGenerationStamp() {
return latestGenerationStamp_;
}
/**
* required uint64 latestGenerationStamp = 8;
* @param value The latestGenerationStamp to set.
* @return This builder for chaining.
*/
public Builder setLatestGenerationStamp(long value) {
latestGenerationStamp_ = value;
bitField0_ |= 0x00000080;
onChanged();
return this;
}
/**
* required uint64 latestGenerationStamp = 8;
* @return This builder for chaining.
*/
public Builder clearLatestGenerationStamp() {
bitField0_ = (bitField0_ & ~0x00000080);
latestGenerationStamp_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> requestedChecksumBuilder_;
/**
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
* @return Whether the requestedChecksum field is set.
*/
public boolean hasRequestedChecksum() {
return ((bitField0_ & 0x00000100) != 0);
}
/**
*
**
* The requested checksum mechanism for this block write.
*
**
* The requested checksum mechanism for this block write.
*
*
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>
getRequestedChecksumFieldBuilder() {
if (requestedChecksumBuilder_ == null) {
requestedChecksumBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
getRequestedChecksum(),
getParentForChildren(),
isClean());
requestedChecksum_ = null;
}
return requestedChecksumBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> cachingStrategyBuilder_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
* @return Whether the cachingStrategy field is set.
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000200) != 0);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
* @return The cachingStrategy.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
} else {
return cachingStrategyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder setCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cachingStrategy_ = value;
} else {
cachingStrategyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000200;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder setCachingStrategy(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder builderForValue) {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = builderForValue.build();
} else {
cachingStrategyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000200;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder mergeCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (((bitField0_ & 0x00000200) != 0) &&
cachingStrategy_ != null &&
cachingStrategy_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) {
getCachingStrategyBuilder().mergeFrom(value);
} else {
cachingStrategy_ = value;
}
} else {
cachingStrategyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000200;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder clearCachingStrategy() {
bitField0_ = (bitField0_ & ~0x00000200);
cachingStrategy_ = null;
if (cachingStrategyBuilder_ != null) {
cachingStrategyBuilder_.dispose();
cachingStrategyBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder getCachingStrategyBuilder() {
bitField0_ |= 0x00000200;
onChanged();
return getCachingStrategyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
if (cachingStrategyBuilder_ != null) {
return cachingStrategyBuilder_.getMessageOrBuilder();
} else {
return cachingStrategy_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>
getCachingStrategyFieldBuilder() {
if (cachingStrategyBuilder_ == null) {
cachingStrategyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>(
getCachingStrategy(),
getParentForChildren(),
isClean());
cachingStrategy_ = null;
}
return cachingStrategyBuilder_;
}
private int storageType_ = 1;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
* @return Whether the storageType field is set.
*/
@java.lang.Override public boolean hasStorageType() {
return ((bitField0_ & 0x00000400) != 0);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
* @return The storageType.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
* @param value The storageType to set.
* @return This builder for chaining.
*/
public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000400;
storageType_ = value.getNumber();
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
* @return This builder for chaining.
*/
public Builder clearStorageType() {
bitField0_ = (bitField0_ & ~0x00000400);
storageType_ = 1;
onChanged();
return this;
}
private java.util.List targetStorageTypes_ =
java.util.Collections.emptyList();
private void ensureTargetStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000800) != 0)) {
targetStorageTypes_ = new java.util.ArrayList(targetStorageTypes_);
bitField0_ |= 0x00000800;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @return A list containing the targetStorageTypes.
*/
public java.util.List getTargetStorageTypesList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @return The count of targetStorageTypes.
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @param index The index of the element to return.
* @return The targetStorageTypes at the given index.
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @param index The index to set the value at.
* @param value The targetStorageTypes to set.
* @return This builder for chaining.
*/
public Builder setTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.set(index, value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @param value The targetStorageTypes to add.
* @return This builder for chaining.
*/
public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @param values The targetStorageTypes to add.
* @return This builder for chaining.
*/
public Builder addAllTargetStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureTargetStorageTypesIsMutable();
for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) {
targetStorageTypes_.add(value.getNumber());
}
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
* @return This builder for chaining.
*/
public Builder clearTargetStorageTypes() {
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000800);
onChanged();
return this;
}
private boolean allowLazyPersist_ ;
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
* @return Whether the allowLazyPersist field is set.
*/
@java.lang.Override
public boolean hasAllowLazyPersist() {
return ((bitField0_ & 0x00001000) != 0);
}
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* optional bool allowLazyPersist = 13 [default = false];
* @param value The allowLazyPersist to set.
* @return This builder for chaining.
*/
public Builder setAllowLazyPersist(boolean value) {
allowLazyPersist_ = value;
bitField0_ |= 0x00001000;
onChanged();
return this;
}
/**
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*
* repeated .hadoop.common.TokenProto blockTokens = 3;
*/
public java.util.List
getBlockTokensBuilderList() {
return getBlockTokensFieldBuilder().getBuilderList();
}
private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensFieldBuilder() {
if (blockTokensBuilder_ == null) {
blockTokensBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
blockTokens_,
((bitField0_ & 0x00000004) != 0),
getParentForChildren(),
isClean());
blockTokens_ = null;
}
return blockTokensBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
* @return Whether the ecPolicy field is set.
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
* @return The ecPolicy.
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
if (ecPolicyBuilder_ == null) {
return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
} else {
return ecPolicyBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ecPolicy_ = value;
} else {
ecPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder setEcPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = builderForValue.build();
} else {
ecPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000008) != 0) &&
ecPolicy_ != null &&
ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
getEcPolicyBuilder().mergeFrom(value);
} else {
ecPolicy_ = value;
}
} else {
ecPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder clearEcPolicy() {
bitField0_ = (bitField0_ & ~0x00000008);
ecPolicy_ = null;
if (ecPolicyBuilder_ != null) {
ecPolicyBuilder_.dispose();
ecPolicyBuilder_ = null;
}
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getEcPolicyFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
if (ecPolicyBuilder_ != null) {
return ecPolicyBuilder_.getMessageOrBuilder();
} else {
return ecPolicy_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
}
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>
getEcPolicyFieldBuilder() {
if (ecPolicyBuilder_ == null) {
ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
getEcPolicy(),
getParentForChildren(),
isClean());
ecPolicy_ = null;
}
return ecPolicyBuilder_;
}
private org.apache.hadoop.thirdparty.protobuf.Internal.IntList blockIndices_ = emptyIntList();
private void ensureBlockIndicesIsMutable() {
if (!((bitField0_ & 0x00000010) != 0)) {
blockIndices_ = mutableCopy(blockIndices_);
bitField0_ |= 0x00000010;
}
}
/**
* repeated uint32 blockIndices = 5;
* @return A list containing the blockIndices.
*/
public java.util.List
getBlockIndicesList() {
return ((bitField0_ & 0x00000010) != 0) ?
java.util.Collections.unmodifiableList(blockIndices_) : blockIndices_;
}
/**
* repeated uint32 blockIndices = 5;
* @return The count of blockIndices.
*/
public int getBlockIndicesCount() {
return blockIndices_.size();
}
/**
* repeated uint32 blockIndices = 5;
* @param index The index of the element to return.
* @return The blockIndices at the given index.
*/
public int getBlockIndices(int index) {
return blockIndices_.getInt(index);
}
/**
* repeated uint32 blockIndices = 5;
* @param index The index to set the value at.
* @param value The blockIndices to set.
* @return This builder for chaining.
*/
public Builder setBlockIndices(
int index, int value) {
ensureBlockIndicesIsMutable();
blockIndices_.setInt(index, value);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
* @param value The blockIndices to add.
* @return This builder for chaining.
*/
public Builder addBlockIndices(int value) {
ensureBlockIndicesIsMutable();
blockIndices_.addInt(value);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
* @param values The blockIndices to add.
* @return This builder for chaining.
*/
public Builder addAllBlockIndices(
java.lang.Iterable extends java.lang.Integer> values) {
ensureBlockIndicesIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, blockIndices_);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
* @return This builder for chaining.
*/
public Builder clearBlockIndices() {
blockIndices_ = emptyIntList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
return this;
}
private long requestedNumBytes_ ;
/**
* required uint64 requestedNumBytes = 6;
* @return Whether the requestedNumBytes field is set.
*/
@java.lang.Override
public boolean hasRequestedNumBytes() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* required uint64 requestedNumBytes = 6;
* @return The requestedNumBytes.
*/
@java.lang.Override
public long getRequestedNumBytes() {
return requestedNumBytes_;
}
/**
* required uint64 requestedNumBytes = 6;
* @param value The requestedNumBytes to set.
* @return This builder for chaining.
*/
public Builder setRequestedNumBytes(long value) {
requestedNumBytes_ = value;
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
* required uint64 requestedNumBytes = 6;
* @return This builder for chaining.
*/
public Builder clearRequestedNumBytes() {
bitField0_ = (bitField0_ & ~0x00000020);
requestedNumBytes_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
* @return Whether the blockChecksumOptions field is set.
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
* @return The blockChecksumOptions.
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
} else {
return blockChecksumOptionsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockChecksumOptions_ = value;
} else {
blockChecksumOptionsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder setBlockChecksumOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = builderForValue.build();
} else {
blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (((bitField0_ & 0x00000040) != 0) &&
blockChecksumOptions_ != null &&
blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
getBlockChecksumOptionsBuilder().mergeFrom(value);
} else {
blockChecksumOptions_ = value;
}
} else {
blockChecksumOptionsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder clearBlockChecksumOptions() {
bitField0_ = (bitField0_ & ~0x00000040);
blockChecksumOptions_ = null;
if (blockChecksumOptionsBuilder_ != null) {
blockChecksumOptionsBuilder_.dispose();
blockChecksumOptionsBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getBlockChecksumOptionsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
if (blockChecksumOptionsBuilder_ != null) {
return blockChecksumOptionsBuilder_.getMessageOrBuilder();
} else {
return blockChecksumOptions_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>
getBlockChecksumOptionsFieldBuilder() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
getBlockChecksumOptions(),
getParentForChildren(),
isClean());
blockChecksumOptions_ = null;
}
return blockChecksumOptionsBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockGroupChecksumProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockGroupChecksumProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpBlockGroupChecksumProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShortCircuitShmIdProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmIdProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required int64 hi = 1;
* @return Whether the hi field is set.
*/
boolean hasHi();
/**
* required int64 hi = 1;
* @return The hi.
*/
long getHi();
/**
* required int64 lo = 2;
* @return Whether the lo field is set.
*/
boolean hasLo();
/**
* required int64 lo = 2;
* @return The lo.
*/
long getLo();
}
/**
*
**
* An ID uniquely identifying a shared memory segment.
*
*
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmIdProto}
*/
public static final class ShortCircuitShmIdProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmIdProto)
ShortCircuitShmIdProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShortCircuitShmIdProto.newBuilder() to construct.
private ShortCircuitShmIdProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShortCircuitShmIdProto() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new ShortCircuitShmIdProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder.class);
}
private int bitField0_;
public static final int HI_FIELD_NUMBER = 1;
private long hi_ = 0L;
/**
* required int64 hi = 1;
* @return Whether the hi field is set.
*/
@java.lang.Override
public boolean hasHi() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required int64 hi = 1;
* @return The hi.
*/
@java.lang.Override
public long getHi() {
return hi_;
}
public static final int LO_FIELD_NUMBER = 2;
private long lo_ = 0L;
/**
* required int64 lo = 2;
* @return Whether the lo field is set.
*/
@java.lang.Override
public boolean hasLo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required int64 lo = 2;
* @return The lo.
*/
@java.lang.Override
public long getLo() {
return lo_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHi()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLo()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeInt64(1, hi_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeInt64(2, lo_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt64Size(1, hi_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt64Size(2, lo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) obj;
if (hasHi() != other.hasHi()) return false;
if (hasHi()) {
if (getHi()
!= other.getHi()) return false;
}
if (hasLo() != other.hasLo()) return false;
if (hasLo()) {
if (getLo()
!= other.getLo()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHi()) {
hash = (37 * hash) + HI_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getHi());
}
if (hasLo()) {
hash = (37 * hash) + LO_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getLo());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* An ID uniquely identifying a shared memory segment.
*
*
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmIdProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmIdProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.newBuilder()
private Builder() {
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
hi_ = 0L;
lo_ = 0L;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.hi_ = hi_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.lo_ = lo_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) return this;
if (other.hasHi()) {
setHi(other.getHi());
}
if (other.hasLo()) {
setLo(other.getLo());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHi()) {
return false;
}
if (!hasLo()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
hi_ = input.readInt64();
bitField0_ |= 0x00000001;
break;
} // case 8
case 16: {
lo_ = input.readInt64();
bitField0_ |= 0x00000002;
break;
} // case 16
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private long hi_ ;
/**
* required int64 hi = 1;
* @return Whether the hi field is set.
*/
@java.lang.Override
public boolean hasHi() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required int64 hi = 1;
* @return The hi.
*/
@java.lang.Override
public long getHi() {
return hi_;
}
/**
* required int64 hi = 1;
* @param value The hi to set.
* @return This builder for chaining.
*/
public Builder setHi(long value) {
hi_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required int64 hi = 1;
* @return This builder for chaining.
*/
public Builder clearHi() {
bitField0_ = (bitField0_ & ~0x00000001);
hi_ = 0L;
onChanged();
return this;
}
private long lo_ ;
/**
* required int64 lo = 2;
* @return Whether the lo field is set.
*/
@java.lang.Override
public boolean hasLo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required int64 lo = 2;
* @return The lo.
*/
@java.lang.Override
public long getLo() {
return lo_;
}
/**
* required int64 lo = 2;
* @param value The lo to set.
* @return This builder for chaining.
*/
public Builder setLo(long value) {
lo_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* required int64 lo = 2;
* @return This builder for chaining.
*/
public Builder clearLo() {
bitField0_ = (bitField0_ & ~0x00000002);
lo_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmIdProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmIdProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShortCircuitShmIdProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShortCircuitShmSlotProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmSlotProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
* @return Whether the shmId field is set.
*/
boolean hasShmId();
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
* @return The shmId.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId();
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder();
/**
* required int32 slotIdx = 2;
* @return Whether the slotIdx field is set.
*/
boolean hasSlotIdx();
/**
* required int32 slotIdx = 2;
* @return The slotIdx.
*/
int getSlotIdx();
}
/**
*
**
* An ID uniquely identifying a slot within a shared memory segment.
*
*
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmSlotProto}
*/
public static final class ShortCircuitShmSlotProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmSlotProto)
ShortCircuitShmSlotProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShortCircuitShmSlotProto.newBuilder() to construct.
private ShortCircuitShmSlotProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShortCircuitShmSlotProto() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new ShortCircuitShmSlotProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder.class);
}
private int bitField0_;
public static final int SHMID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto shmId_;
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
* @return Whether the shmId field is set.
*/
@java.lang.Override
public boolean hasShmId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
* @return The shmId.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId() {
return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder() {
return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
}
public static final int SLOTIDX_FIELD_NUMBER = 2;
private int slotIdx_ = 0;
/**
* required int32 slotIdx = 2;
* @return Whether the slotIdx field is set.
*/
@java.lang.Override
public boolean hasSlotIdx() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required int32 slotIdx = 2;
* @return The slotIdx.
*/
@java.lang.Override
public int getSlotIdx() {
return slotIdx_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasShmId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSlotIdx()) {
memoizedIsInitialized = 0;
return false;
}
if (!getShmId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getShmId());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeInt32(2, slotIdx_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getShmId());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt32Size(2, slotIdx_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) obj;
if (hasShmId() != other.hasShmId()) return false;
if (hasShmId()) {
if (!getShmId()
.equals(other.getShmId())) return false;
}
if (hasSlotIdx() != other.hasSlotIdx()) return false;
if (hasSlotIdx()) {
if (getSlotIdx()
!= other.getSlotIdx()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasShmId()) {
hash = (37 * hash) + SHMID_FIELD_NUMBER;
hash = (53 * hash) + getShmId().hashCode();
}
if (hasSlotIdx()) {
hash = (37 * hash) + SLOTIDX_FIELD_NUMBER;
hash = (53 * hash) + getSlotIdx();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* An ID uniquely identifying a slot within a shared memory segment.
*
*
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmSlotProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmSlotProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getShmIdFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
shmId_ = null;
if (shmIdBuilder_ != null) {
shmIdBuilder_.dispose();
shmIdBuilder_ = null;
}
slotIdx_ = 0;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.shmId_ = shmIdBuilder_ == null
? shmId_
: shmIdBuilder_.build();
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.slotIdx_ = slotIdx_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) return this;
if (other.hasShmId()) {
mergeShmId(other.getShmId());
}
if (other.hasSlotIdx()) {
setSlotIdx(other.getSlotIdx());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasShmId()) {
return false;
}
if (!hasSlotIdx()) {
return false;
}
if (!getShmId().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
input.readMessage(
getShmIdFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000001;
break;
} // case 10
case 16: {
slotIdx_ = input.readInt32();
bitField0_ |= 0x00000002;
break;
} // case 16
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto shmId_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> shmIdBuilder_;
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
* @return Whether the shmId field is set.
*/
public boolean hasShmId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
* @return The shmId.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId() {
if (shmIdBuilder_ == null) {
return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
} else {
return shmIdBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder setShmId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (shmIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
shmId_ = value;
} else {
shmIdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder setShmId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder builderForValue) {
if (shmIdBuilder_ == null) {
shmId_ = builderForValue.build();
} else {
shmIdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder mergeShmId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (shmIdBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
shmId_ != null &&
shmId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) {
getShmIdBuilder().mergeFrom(value);
} else {
shmId_ = value;
}
} else {
shmIdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder clearShmId() {
bitField0_ = (bitField0_ & ~0x00000001);
shmId_ = null;
if (shmIdBuilder_ != null) {
shmIdBuilder_.dispose();
shmIdBuilder_ = null;
}
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder getShmIdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getShmIdFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder() {
if (shmIdBuilder_ != null) {
return shmIdBuilder_.getMessageOrBuilder();
} else {
return shmId_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>
getShmIdFieldBuilder() {
if (shmIdBuilder_ == null) {
shmIdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>(
getShmId(),
getParentForChildren(),
isClean());
shmId_ = null;
}
return shmIdBuilder_;
}
private int slotIdx_ ;
/**
* required int32 slotIdx = 2;
* @return Whether the slotIdx field is set.
*/
@java.lang.Override
public boolean hasSlotIdx() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required int32 slotIdx = 2;
* @return The slotIdx.
*/
@java.lang.Override
public int getSlotIdx() {
return slotIdx_;
}
/**
* required int32 slotIdx = 2;
* @param value The slotIdx to set.
* @return This builder for chaining.
*/
public Builder setSlotIdx(int value) {
slotIdx_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* required int32 slotIdx = 2;
* @return This builder for chaining.
*/
public Builder clearSlotIdx() {
bitField0_ = (bitField0_ & ~0x00000002);
slotIdx_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmSlotProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmSlotProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShortCircuitShmSlotProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpRequestShortCircuitAccessProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpRequestShortCircuitAccessProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
* @return Whether the header field is set.
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
* @return The header.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
* @return Whether the maxVersion field is set.
*/
boolean hasMaxVersion();
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
* @return The maxVersion.
*/
int getMaxVersion();
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
* @return Whether the slotId field is set.
*/
boolean hasSlotId();
/**
*
**
* The shared memory slot to use, if we are using one.
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
* @return Whether the supportsReceiptVerification field is set.
*/
boolean hasSupportsReceiptVerification();
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
* @return The supportsReceiptVerification.
*/
boolean getSupportsReceiptVerification();
}
/**
* Protobuf type {@code hadoop.hdfs.OpRequestShortCircuitAccessProto}
*/
public static final class OpRequestShortCircuitAccessProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpRequestShortCircuitAccessProto)
OpRequestShortCircuitAccessProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpRequestShortCircuitAccessProto.newBuilder() to construct.
private OpRequestShortCircuitAccessProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpRequestShortCircuitAccessProto() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new OpRequestShortCircuitAccessProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.Builder.class);
}
private int bitField0_;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
* @return Whether the header field is set.
*/
@java.lang.Override
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
* @return The header.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
public static final int MAXVERSION_FIELD_NUMBER = 2;
private int maxVersion_ = 0;
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
* @return Whether the maxVersion field is set.
*/
@java.lang.Override
public boolean hasMaxVersion() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
* @return The maxVersion.
*/
@java.lang.Override
public int getMaxVersion() {
return maxVersion_;
}
public static final int SLOTID_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
/**
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
* @return Whether the slotId field is set.
*/
@java.lang.Override
public boolean hasSlotId() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
**
* The shared memory slot to use, if we are using one.
*
**
* The shared memory slot to use, if we are using one.
*
*
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
}
public static final int SUPPORTSRECEIPTVERIFICATION_FIELD_NUMBER = 4;
private boolean supportsReceiptVerification_ = false;
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
* @return Whether the supportsReceiptVerification field is set.
*/
@java.lang.Override
public boolean hasSupportsReceiptVerification() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*
* optional bool supportsReceiptVerification = 4 [default = false];
* @return The supportsReceiptVerification.
*/
@java.lang.Override
public boolean getSupportsReceiptVerification() {
return supportsReceiptVerification_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMaxVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasSlotId()) {
if (!getSlotId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt32(2, maxVersion_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getSlotId());
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeBool(4, supportsReceiptVerification_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getHeader());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(2, maxVersion_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getSlotId());
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(4, supportsReceiptVerification_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) obj;
if (hasHeader() != other.hasHeader()) return false;
if (hasHeader()) {
if (!getHeader()
.equals(other.getHeader())) return false;
}
if (hasMaxVersion() != other.hasMaxVersion()) return false;
if (hasMaxVersion()) {
if (getMaxVersion()
!= other.getMaxVersion()) return false;
}
if (hasSlotId() != other.hasSlotId()) return false;
if (hasSlotId()) {
if (!getSlotId()
.equals(other.getSlotId())) return false;
}
if (hasSupportsReceiptVerification() != other.hasSupportsReceiptVerification()) return false;
if (hasSupportsReceiptVerification()) {
if (getSupportsReceiptVerification()
!= other.getSupportsReceiptVerification()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasMaxVersion()) {
hash = (37 * hash) + MAXVERSION_FIELD_NUMBER;
hash = (53 * hash) + getMaxVersion();
}
if (hasSlotId()) {
hash = (37 * hash) + SLOTID_FIELD_NUMBER;
hash = (53 * hash) + getSlotId().hashCode();
}
if (hasSupportsReceiptVerification()) {
hash = (37 * hash) + SUPPORTSRECEIPTVERIFICATION_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getSupportsReceiptVerification());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpRequestShortCircuitAccessProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpRequestShortCircuitAccessProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getSlotIdFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
header_ = null;
if (headerBuilder_ != null) {
headerBuilder_.dispose();
headerBuilder_ = null;
}
maxVersion_ = 0;
slotId_ = null;
if (slotIdBuilder_ != null) {
slotIdBuilder_.dispose();
slotIdBuilder_ = null;
}
supportsReceiptVerification_ = false;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.header_ = headerBuilder_ == null
? header_
: headerBuilder_.build();
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.maxVersion_ = maxVersion_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.slotId_ = slotIdBuilder_ == null
? slotId_
: slotIdBuilder_.build();
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.supportsReceiptVerification_ = supportsReceiptVerification_;
to_bitField0_ |= 0x00000008;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasMaxVersion()) {
setMaxVersion(other.getMaxVersion());
}
if (other.hasSlotId()) {
mergeSlotId(other.getSlotId());
}
if (other.hasSupportsReceiptVerification()) {
setSupportsReceiptVerification(other.getSupportsReceiptVerification());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasMaxVersion()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
if (hasSlotId()) {
if (!getSlotId().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
input.readMessage(
getHeaderFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000001;
break;
} // case 10
case 16: {
maxVersion_ = input.readUInt32();
bitField0_ |= 0x00000002;
break;
} // case 16
case 26: {
input.readMessage(
getSlotIdFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000004;
break;
} // case 26
case 32: {
supportsReceiptVerification_ = input.readBool();
bitField0_ |= 0x00000008;
break;
} // case 32
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
* @return Whether the header field is set.
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
* @return The header.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
header_ != null &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
getHeaderBuilder().mergeFrom(value);
} else {
header_ = value;
}
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
bitField0_ = (bitField0_ & ~0x00000001);
header_ = null;
if (headerBuilder_ != null) {
headerBuilder_.dispose();
headerBuilder_ = null;
}
onChanged();
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
getHeader(),
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
private int maxVersion_ ;
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
* @return Whether the maxVersion field is set.
*/
@java.lang.Override
public boolean hasMaxVersion() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
* @return The maxVersion.
*/
@java.lang.Override
public int getMaxVersion() {
return maxVersion_;
}
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*
* required uint32 maxVersion = 2;
* @param value The maxVersion to set.
* @return This builder for chaining.
*/
public Builder setMaxVersion(int value) {
maxVersion_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*
* required string clientName = 1;
* @param value The bytes for clientName to set.
* @return This builder for chaining.
*/
public Builder setClientNameBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
clientName_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
* @return Whether the traceInfo field is set.
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
* @return The traceInfo.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
if (traceInfoBuilder_ == null) {
return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
} else {
return traceInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
traceInfo_ = value;
} else {
traceInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
if (traceInfoBuilder_ == null) {
traceInfo_ = builderForValue.build();
} else {
traceInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0) &&
traceInfo_ != null &&
traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
getTraceInfoBuilder().mergeFrom(value);
} else {
traceInfo_ = value;
}
} else {
traceInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder clearTraceInfo() {
bitField0_ = (bitField0_ & ~0x00000002);
traceInfo_ = null;
if (traceInfoBuilder_ != null) {
traceInfoBuilder_.dispose();
traceInfoBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTraceInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
if (traceInfoBuilder_ != null) {
return traceInfoBuilder_.getMessageOrBuilder();
} else {
return traceInfo_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>
getTraceInfoFieldBuilder() {
if (traceInfoBuilder_ == null) {
traceInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
getTraceInfo(),
getParentForChildren(),
isClean());
traceInfo_ = null;
}
return traceInfoBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmRequestProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShortCircuitShmRequestProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShortCircuitShmResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmResponseProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
/**
* optional string error = 2;
* @return Whether the error field is set.
*/
boolean hasError();
/**
* optional string error = 2;
* @return The error.
*/
java.lang.String getError();
/**
* optional string error = 2;
* @return The bytes for error.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes();
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
* @return Whether the id field is set.
*/
boolean hasId();
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
* @return The id.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId();
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmResponseProto}
*/
public static final class ShortCircuitShmResponseProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmResponseProto)
ShortCircuitShmResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShortCircuitShmResponseProto.newBuilder() to construct.
private ShortCircuitShmResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShortCircuitShmResponseProto() {
status_ = 0;
error_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new ShortCircuitShmResponseProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
@java.lang.Override public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
@java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
public static final int ERROR_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private volatile java.lang.Object error_ = "";
/**
* optional string error = 2;
* @return Whether the error field is set.
*/
@java.lang.Override
public boolean hasError() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string error = 2;
* @return The error.
*/
@java.lang.Override
public java.lang.String getError() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
error_ = s;
}
return s;
}
}
/**
* optional string error = 2;
* @return The bytes for error.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int ID_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto id_;
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
* @return Whether the id field is set.
*/
@java.lang.Override
public boolean hasId() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
* @return The id.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId() {
return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder() {
return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (hasId()) {
if (!getId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, error_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getId());
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, error_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getId());
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (hasError() != other.hasError()) return false;
if (hasError()) {
if (!getError()
.equals(other.getError())) return false;
}
if (hasId() != other.hasId()) return false;
if (hasId()) {
if (!getId()
.equals(other.getId())) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
if (hasError()) {
hash = (37 * hash) + ERROR_FIELD_NUMBER;
hash = (53 * hash) + getError().hashCode();
}
if (hasId()) {
hash = (37 * hash) + ID_FIELD_NUMBER;
hash = (53 * hash) + getId().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmResponseProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getIdFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
status_ = 0;
error_ = "";
id_ = null;
if (idBuilder_ != null) {
idBuilder_.dispose();
idBuilder_ = null;
}
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.status_ = status_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.error_ = error_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.id_ = idBuilder_ == null
? id_
: idBuilder_.build();
to_bitField0_ |= 0x00000004;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasError()) {
error_ = other.error_;
bitField0_ |= 0x00000002;
onChanged();
}
if (other.hasId()) {
mergeId(other.getId());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
if (hasId()) {
if (!getId().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(1, tmpRaw);
} else {
status_ = tmpRaw;
bitField0_ |= 0x00000001;
}
break;
} // case 8
case 18: {
error_ = input.readBytes();
bitField0_ |= 0x00000002;
break;
} // case 18
case 26: {
input.readMessage(
getIdFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000004;
break;
} // case 26
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
@java.lang.Override public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
* @param value The status to set.
* @return This builder for chaining.
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return This builder for chaining.
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
private java.lang.Object error_ = "";
/**
* optional string error = 2;
* @return Whether the error field is set.
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string error = 2;
* @return The error.
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
error_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string error = 2;
* @return The bytes for error.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string error = 2;
* @param value The error to set.
* @return This builder for chaining.
*/
public Builder setError(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
error_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* optional string error = 2;
* @return This builder for chaining.
*/
public Builder clearError() {
error_ = getDefaultInstance().getError();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
* optional string error = 2;
* @param value The bytes for error to set.
* @return This builder for chaining.
*/
public Builder setErrorBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
error_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto id_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> idBuilder_;
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
* @return Whether the id field is set.
*/
public boolean hasId() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
* @return The id.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId() {
if (idBuilder_ == null) {
return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
} else {
return idBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder setId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (idBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
id_ = value;
} else {
idBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder setId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder builderForValue) {
if (idBuilder_ == null) {
id_ = builderForValue.build();
} else {
idBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (idBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
id_ != null &&
id_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) {
getIdBuilder().mergeFrom(value);
} else {
id_ = value;
}
} else {
idBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder clearId() {
bitField0_ = (bitField0_ & ~0x00000004);
id_ = null;
if (idBuilder_ != null) {
idBuilder_.dispose();
idBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder getIdBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getIdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder() {
if (idBuilder_ != null) {
return idBuilder_.getMessageOrBuilder();
} else {
return id_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
}
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>
getIdFieldBuilder() {
if (idBuilder_ == null) {
idBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>(
getId(),
getParentForChildren(),
isClean());
id_ = null;
}
return idBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmResponseProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShortCircuitShmResponseProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface PacketHeaderProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.PacketHeaderProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
* @return Whether the offsetInBlock field is set.
*/
boolean hasOffsetInBlock();
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
* @return The offsetInBlock.
*/
long getOffsetInBlock();
/**
* required sfixed64 seqno = 2;
* @return Whether the seqno field is set.
*/
boolean hasSeqno();
/**
* required sfixed64 seqno = 2;
* @return The seqno.
*/
long getSeqno();
/**
* required bool lastPacketInBlock = 3;
* @return Whether the lastPacketInBlock field is set.
*/
boolean hasLastPacketInBlock();
/**
* required bool lastPacketInBlock = 3;
* @return The lastPacketInBlock.
*/
boolean getLastPacketInBlock();
/**
* required sfixed32 dataLen = 4;
* @return Whether the dataLen field is set.
*/
boolean hasDataLen();
/**
* required sfixed32 dataLen = 4;
* @return The dataLen.
*/
int getDataLen();
/**
* optional bool syncBlock = 5 [default = false];
* @return Whether the syncBlock field is set.
*/
boolean hasSyncBlock();
/**
* optional bool syncBlock = 5 [default = false];
* @return The syncBlock.
*/
boolean getSyncBlock();
}
/**
* Protobuf type {@code hadoop.hdfs.PacketHeaderProto}
*/
public static final class PacketHeaderProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.PacketHeaderProto)
PacketHeaderProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use PacketHeaderProto.newBuilder() to construct.
private PacketHeaderProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private PacketHeaderProto() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new PacketHeaderProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
}
private int bitField0_;
public static final int OFFSETINBLOCK_FIELD_NUMBER = 1;
private long offsetInBlock_ = 0L;
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
* @return Whether the offsetInBlock field is set.
*/
@java.lang.Override
public boolean hasOffsetInBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
* @return The offsetInBlock.
*/
@java.lang.Override
public long getOffsetInBlock() {
return offsetInBlock_;
}
public static final int SEQNO_FIELD_NUMBER = 2;
private long seqno_ = 0L;
/**
* required sfixed64 seqno = 2;
* @return Whether the seqno field is set.
*/
@java.lang.Override
public boolean hasSeqno() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required sfixed64 seqno = 2;
* @return The seqno.
*/
@java.lang.Override
public long getSeqno() {
return seqno_;
}
public static final int LASTPACKETINBLOCK_FIELD_NUMBER = 3;
private boolean lastPacketInBlock_ = false;
/**
* required bool lastPacketInBlock = 3;
* @return Whether the lastPacketInBlock field is set.
*/
@java.lang.Override
public boolean hasLastPacketInBlock() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required bool lastPacketInBlock = 3;
* @return The lastPacketInBlock.
*/
@java.lang.Override
public boolean getLastPacketInBlock() {
return lastPacketInBlock_;
}
public static final int DATALEN_FIELD_NUMBER = 4;
private int dataLen_ = 0;
/**
* required sfixed32 dataLen = 4;
* @return Whether the dataLen field is set.
*/
@java.lang.Override
public boolean hasDataLen() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required sfixed32 dataLen = 4;
* @return The dataLen.
*/
@java.lang.Override
public int getDataLen() {
return dataLen_;
}
public static final int SYNCBLOCK_FIELD_NUMBER = 5;
private boolean syncBlock_ = false;
/**
* optional bool syncBlock = 5 [default = false];
* @return Whether the syncBlock field is set.
*/
@java.lang.Override
public boolean hasSyncBlock() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional bool syncBlock = 5 [default = false];
* @return The syncBlock.
*/
@java.lang.Override
public boolean getSyncBlock() {
return syncBlock_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasOffsetInBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSeqno()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLastPacketInBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDataLen()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeSFixed64(1, offsetInBlock_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeSFixed64(2, seqno_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeBool(3, lastPacketInBlock_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeSFixed32(4, dataLen_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeBool(5, syncBlock_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeSFixed64Size(1, offsetInBlock_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeSFixed64Size(2, seqno_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(3, lastPacketInBlock_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeSFixed32Size(4, dataLen_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBoolSize(5, syncBlock_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) obj;
if (hasOffsetInBlock() != other.hasOffsetInBlock()) return false;
if (hasOffsetInBlock()) {
if (getOffsetInBlock()
!= other.getOffsetInBlock()) return false;
}
if (hasSeqno() != other.hasSeqno()) return false;
if (hasSeqno()) {
if (getSeqno()
!= other.getSeqno()) return false;
}
if (hasLastPacketInBlock() != other.hasLastPacketInBlock()) return false;
if (hasLastPacketInBlock()) {
if (getLastPacketInBlock()
!= other.getLastPacketInBlock()) return false;
}
if (hasDataLen() != other.hasDataLen()) return false;
if (hasDataLen()) {
if (getDataLen()
!= other.getDataLen()) return false;
}
if (hasSyncBlock() != other.hasSyncBlock()) return false;
if (hasSyncBlock()) {
if (getSyncBlock()
!= other.getSyncBlock()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasOffsetInBlock()) {
hash = (37 * hash) + OFFSETINBLOCK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getOffsetInBlock());
}
if (hasSeqno()) {
hash = (37 * hash) + SEQNO_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getSeqno());
}
if (hasLastPacketInBlock()) {
hash = (37 * hash) + LASTPACKETINBLOCK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getLastPacketInBlock());
}
if (hasDataLen()) {
hash = (37 * hash) + DATALEN_FIELD_NUMBER;
hash = (53 * hash) + getDataLen();
}
if (hasSyncBlock()) {
hash = (37 * hash) + SYNCBLOCK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getSyncBlock());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.PacketHeaderProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.PacketHeaderProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.newBuilder()
private Builder() {
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
offsetInBlock_ = 0L;
seqno_ = 0L;
lastPacketInBlock_ = false;
dataLen_ = 0;
syncBlock_ = false;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.offsetInBlock_ = offsetInBlock_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.seqno_ = seqno_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.lastPacketInBlock_ = lastPacketInBlock_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.dataLen_ = dataLen_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.syncBlock_ = syncBlock_;
to_bitField0_ |= 0x00000010;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance()) return this;
if (other.hasOffsetInBlock()) {
setOffsetInBlock(other.getOffsetInBlock());
}
if (other.hasSeqno()) {
setSeqno(other.getSeqno());
}
if (other.hasLastPacketInBlock()) {
setLastPacketInBlock(other.getLastPacketInBlock());
}
if (other.hasDataLen()) {
setDataLen(other.getDataLen());
}
if (other.hasSyncBlock()) {
setSyncBlock(other.getSyncBlock());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasOffsetInBlock()) {
return false;
}
if (!hasSeqno()) {
return false;
}
if (!hasLastPacketInBlock()) {
return false;
}
if (!hasDataLen()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 9: {
offsetInBlock_ = input.readSFixed64();
bitField0_ |= 0x00000001;
break;
} // case 9
case 17: {
seqno_ = input.readSFixed64();
bitField0_ |= 0x00000002;
break;
} // case 17
case 24: {
lastPacketInBlock_ = input.readBool();
bitField0_ |= 0x00000004;
break;
} // case 24
case 37: {
dataLen_ = input.readSFixed32();
bitField0_ |= 0x00000008;
break;
} // case 37
case 40: {
syncBlock_ = input.readBool();
bitField0_ |= 0x00000010;
break;
} // case 40
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private long offsetInBlock_ ;
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
* @return Whether the offsetInBlock field is set.
*/
@java.lang.Override
public boolean hasOffsetInBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
* @return The offsetInBlock.
*/
@java.lang.Override
public long getOffsetInBlock() {
return offsetInBlock_;
}
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
* @param value The offsetInBlock to set.
* @return This builder for chaining.
*/
public Builder setOffsetInBlock(long value) {
offsetInBlock_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* All fields must be fixed-length!
*
*
* required sfixed64 offsetInBlock = 1;
* @return This builder for chaining.
*/
public Builder clearOffsetInBlock() {
bitField0_ = (bitField0_ & ~0x00000001);
offsetInBlock_ = 0L;
onChanged();
return this;
}
private long seqno_ ;
/**
* required sfixed64 seqno = 2;
* @return Whether the seqno field is set.
*/
@java.lang.Override
public boolean hasSeqno() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required sfixed64 seqno = 2;
* @return The seqno.
*/
@java.lang.Override
public long getSeqno() {
return seqno_;
}
/**
* required sfixed64 seqno = 2;
* @param value The seqno to set.
* @return This builder for chaining.
*/
public Builder setSeqno(long value) {
seqno_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* required sfixed64 seqno = 2;
* @return This builder for chaining.
*/
public Builder clearSeqno() {
bitField0_ = (bitField0_ & ~0x00000002);
seqno_ = 0L;
onChanged();
return this;
}
private boolean lastPacketInBlock_ ;
/**
* required bool lastPacketInBlock = 3;
* @return Whether the lastPacketInBlock field is set.
*/
@java.lang.Override
public boolean hasLastPacketInBlock() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required bool lastPacketInBlock = 3;
* @return The lastPacketInBlock.
*/
@java.lang.Override
public boolean getLastPacketInBlock() {
return lastPacketInBlock_;
}
/**
* required bool lastPacketInBlock = 3;
* @param value The lastPacketInBlock to set.
* @return This builder for chaining.
*/
public Builder setLastPacketInBlock(boolean value) {
lastPacketInBlock_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* required bool lastPacketInBlock = 3;
* @return This builder for chaining.
*/
public Builder clearLastPacketInBlock() {
bitField0_ = (bitField0_ & ~0x00000004);
lastPacketInBlock_ = false;
onChanged();
return this;
}
private int dataLen_ ;
/**
* required sfixed32 dataLen = 4;
* @return Whether the dataLen field is set.
*/
@java.lang.Override
public boolean hasDataLen() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* required sfixed32 dataLen = 4;
* @return The dataLen.
*/
@java.lang.Override
public int getDataLen() {
return dataLen_;
}
/**
* required sfixed32 dataLen = 4;
* @param value The dataLen to set.
* @return This builder for chaining.
*/
public Builder setDataLen(int value) {
dataLen_ = value;
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* required sfixed32 dataLen = 4;
* @return This builder for chaining.
*/
public Builder clearDataLen() {
bitField0_ = (bitField0_ & ~0x00000008);
dataLen_ = 0;
onChanged();
return this;
}
private boolean syncBlock_ ;
/**
* optional bool syncBlock = 5 [default = false];
* @return Whether the syncBlock field is set.
*/
@java.lang.Override
public boolean hasSyncBlock() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional bool syncBlock = 5 [default = false];
* @return The syncBlock.
*/
@java.lang.Override
public boolean getSyncBlock() {
return syncBlock_;
}
/**
* optional bool syncBlock = 5 [default = false];
* @param value The syncBlock to set.
* @return This builder for chaining.
*/
public Builder setSyncBlock(boolean value) {
syncBlock_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
* optional bool syncBlock = 5 [default = false];
* @return This builder for chaining.
*/
public Builder clearSyncBlock() {
bitField0_ = (bitField0_ & ~0x00000010);
syncBlock_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.PacketHeaderProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.PacketHeaderProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public PacketHeaderProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface PipelineAckProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.PipelineAckProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required sint64 seqno = 1;
* @return Whether the seqno field is set.
*/
boolean hasSeqno();
/**
* required sint64 seqno = 1;
* @return The seqno.
*/
long getSeqno();
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @return A list containing the reply.
*/
java.util.List getReplyList();
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @return The count of reply.
*/
int getReplyCount();
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @param index The index of the element to return.
* @return The reply at the given index.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index);
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
* @return Whether the downstreamAckTimeNanos field is set.
*/
boolean hasDownstreamAckTimeNanos();
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
* @return The downstreamAckTimeNanos.
*/
long getDownstreamAckTimeNanos();
/**
* repeated uint32 flag = 4 [packed = true];
* @return A list containing the flag.
*/
java.util.List getFlagList();
/**
* repeated uint32 flag = 4 [packed = true];
* @return The count of flag.
*/
int getFlagCount();
/**
* repeated uint32 flag = 4 [packed = true];
* @param index The index of the element to return.
* @return The flag at the given index.
*/
int getFlag(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.PipelineAckProto}
*/
public static final class PipelineAckProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.PipelineAckProto)
PipelineAckProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use PipelineAckProto.newBuilder() to construct.
private PipelineAckProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private PipelineAckProto() {
reply_ = java.util.Collections.emptyList();
flag_ = emptyIntList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new PipelineAckProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
}
private int bitField0_;
public static final int SEQNO_FIELD_NUMBER = 1;
private long seqno_ = 0L;
/**
* required sint64 seqno = 1;
* @return Whether the seqno field is set.
*/
@java.lang.Override
public boolean hasSeqno() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required sint64 seqno = 1;
* @return The seqno.
*/
@java.lang.Override
public long getSeqno() {
return seqno_;
}
public static final int REPLY_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private java.util.List reply_;
private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> reply_converter_ =
new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>() {
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status convert(java.lang.Integer from) {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(from);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
};
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @return A list containing the reply.
*/
@java.lang.Override
public java.util.List getReplyList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>(reply_, reply_converter_);
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @return The count of reply.
*/
@java.lang.Override
public int getReplyCount() {
return reply_.size();
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @param index The index of the element to return.
* @return The reply at the given index.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index) {
return reply_converter_.convert(reply_.get(index));
}
public static final int DOWNSTREAMACKTIMENANOS_FIELD_NUMBER = 3;
private long downstreamAckTimeNanos_ = 0L;
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
* @return Whether the downstreamAckTimeNanos field is set.
*/
@java.lang.Override
public boolean hasDownstreamAckTimeNanos() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
* @return The downstreamAckTimeNanos.
*/
@java.lang.Override
public long getDownstreamAckTimeNanos() {
return downstreamAckTimeNanos_;
}
public static final int FLAG_FIELD_NUMBER = 4;
@SuppressWarnings("serial")
private org.apache.hadoop.thirdparty.protobuf.Internal.IntList flag_;
/**
* repeated uint32 flag = 4 [packed = true];
* @return A list containing the flag.
*/
@java.lang.Override
public java.util.List
getFlagList() {
return flag_;
}
/**
* repeated uint32 flag = 4 [packed = true];
* @return The count of flag.
*/
public int getFlagCount() {
return flag_.size();
}
/**
* repeated uint32 flag = 4 [packed = true];
* @param index The index of the element to return.
* @return The flag at the given index.
*/
public int getFlag(int index) {
return flag_.getInt(index);
}
private int flagMemoizedSerializedSize = -1;
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasSeqno()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) != 0)) {
output.writeSInt64(1, seqno_);
}
for (int i = 0; i < reply_.size(); i++) {
output.writeEnum(2, reply_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(3, downstreamAckTimeNanos_);
}
if (getFlagList().size() > 0) {
output.writeUInt32NoTag(34);
output.writeUInt32NoTag(flagMemoizedSerializedSize);
}
for (int i = 0; i < flag_.size(); i++) {
output.writeUInt32NoTag(flag_.getInt(i));
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeSInt64Size(1, seqno_);
}
{
int dataSize = 0;
for (int i = 0; i < reply_.size(); i++) {
dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSizeNoTag(reply_.get(i));
}
size += dataSize;
size += 1 * reply_.size();
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(3, downstreamAckTimeNanos_);
}
{
int dataSize = 0;
for (int i = 0; i < flag_.size(); i++) {
dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32SizeNoTag(flag_.getInt(i));
}
size += dataSize;
if (!getFlagList().isEmpty()) {
size += 1;
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
flagMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj;
if (hasSeqno() != other.hasSeqno()) return false;
if (hasSeqno()) {
if (getSeqno()
!= other.getSeqno()) return false;
}
if (!reply_.equals(other.reply_)) return false;
if (hasDownstreamAckTimeNanos() != other.hasDownstreamAckTimeNanos()) return false;
if (hasDownstreamAckTimeNanos()) {
if (getDownstreamAckTimeNanos()
!= other.getDownstreamAckTimeNanos()) return false;
}
if (!getFlagList()
.equals(other.getFlagList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasSeqno()) {
hash = (37 * hash) + SEQNO_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getSeqno());
}
if (getReplyCount() > 0) {
hash = (37 * hash) + REPLY_FIELD_NUMBER;
hash = (53 * hash) + reply_.hashCode();
}
if (hasDownstreamAckTimeNanos()) {
hash = (37 * hash) + DOWNSTREAMACKTIMENANOS_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getDownstreamAckTimeNanos());
}
if (getFlagCount() > 0) {
hash = (37 * hash) + FLAG_FIELD_NUMBER;
hash = (53 * hash) + getFlagList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.PipelineAckProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.PipelineAckProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.newBuilder()
private Builder() {
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
seqno_ = 0L;
reply_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
downstreamAckTimeNanos_ = 0L;
flag_ = emptyIntList();
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto(this);
buildPartialRepeatedFields(result);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result) {
if (((bitField0_ & 0x00000002) != 0)) {
reply_ = java.util.Collections.unmodifiableList(reply_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.reply_ = reply_;
if (((bitField0_ & 0x00000008) != 0)) {
flag_.makeImmutable();
bitField0_ = (bitField0_ & ~0x00000008);
}
result.flag_ = flag_;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.seqno_ = seqno_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.downstreamAckTimeNanos_ = downstreamAckTimeNanos_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this;
if (other.hasSeqno()) {
setSeqno(other.getSeqno());
}
if (!other.reply_.isEmpty()) {
if (reply_.isEmpty()) {
reply_ = other.reply_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureReplyIsMutable();
reply_.addAll(other.reply_);
}
onChanged();
}
if (other.hasDownstreamAckTimeNanos()) {
setDownstreamAckTimeNanos(other.getDownstreamAckTimeNanos());
}
if (!other.flag_.isEmpty()) {
if (flag_.isEmpty()) {
flag_ = other.flag_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureFlagIsMutable();
flag_.addAll(other.flag_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasSeqno()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
seqno_ = input.readSInt64();
bitField0_ |= 0x00000001;
break;
} // case 8
case 16: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(2, tmpRaw);
} else {
ensureReplyIsMutable();
reply_.add(tmpRaw);
}
break;
} // case 16
case 18: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(2, tmpRaw);
} else {
ensureReplyIsMutable();
reply_.add(tmpRaw);
}
}
input.popLimit(oldLimit);
break;
} // case 18
case 24: {
downstreamAckTimeNanos_ = input.readUInt64();
bitField0_ |= 0x00000004;
break;
} // case 24
case 32: {
int v = input.readUInt32();
ensureFlagIsMutable();
flag_.addInt(v);
break;
} // case 32
case 34: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
ensureFlagIsMutable();
while (input.getBytesUntilLimit() > 0) {
flag_.addInt(input.readUInt32());
}
input.popLimit(limit);
break;
} // case 34
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private long seqno_ ;
/**
* required sint64 seqno = 1;
* @return Whether the seqno field is set.
*/
@java.lang.Override
public boolean hasSeqno() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required sint64 seqno = 1;
* @return The seqno.
*/
@java.lang.Override
public long getSeqno() {
return seqno_;
}
/**
* required sint64 seqno = 1;
* @param value The seqno to set.
* @return This builder for chaining.
*/
public Builder setSeqno(long value) {
seqno_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required sint64 seqno = 1;
* @return This builder for chaining.
*/
public Builder clearSeqno() {
bitField0_ = (bitField0_ & ~0x00000001);
seqno_ = 0L;
onChanged();
return this;
}
private java.util.List reply_ =
java.util.Collections.emptyList();
private void ensureReplyIsMutable() {
if (!((bitField0_ & 0x00000002) != 0)) {
reply_ = new java.util.ArrayList(reply_);
bitField0_ |= 0x00000002;
}
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @return A list containing the reply.
*/
public java.util.List getReplyList() {
return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>(reply_, reply_converter_);
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @return The count of reply.
*/
public int getReplyCount() {
return reply_.size();
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @param index The index of the element to return.
* @return The reply at the given index.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index) {
return reply_converter_.convert(reply_.get(index));
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @param index The index to set the value at.
* @param value The reply to set.
* @return This builder for chaining.
*/
public Builder setReply(
int index, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
ensureReplyIsMutable();
reply_.set(index, value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @param value The reply to add.
* @return This builder for chaining.
*/
public Builder addReply(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
ensureReplyIsMutable();
reply_.add(value.getNumber());
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @param values The reply to add.
* @return This builder for chaining.
*/
public Builder addAllReply(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> values) {
ensureReplyIsMutable();
for (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value : values) {
reply_.add(value.getNumber());
}
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
* @return This builder for chaining.
*/
public Builder clearReply() {
reply_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
private long downstreamAckTimeNanos_ ;
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
* @return Whether the downstreamAckTimeNanos field is set.
*/
@java.lang.Override
public boolean hasDownstreamAckTimeNanos() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
* @return The downstreamAckTimeNanos.
*/
@java.lang.Override
public long getDownstreamAckTimeNanos() {
return downstreamAckTimeNanos_;
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
* @param value The downstreamAckTimeNanos to set.
* @return This builder for chaining.
*/
public Builder setDownstreamAckTimeNanos(long value) {
downstreamAckTimeNanos_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
* @return This builder for chaining.
*/
public Builder clearDownstreamAckTimeNanos() {
bitField0_ = (bitField0_ & ~0x00000004);
downstreamAckTimeNanos_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.Internal.IntList flag_ = emptyIntList();
private void ensureFlagIsMutable() {
if (!((bitField0_ & 0x00000008) != 0)) {
flag_ = mutableCopy(flag_);
bitField0_ |= 0x00000008;
}
}
/**
* repeated uint32 flag = 4 [packed = true];
* @return A list containing the flag.
*/
public java.util.List
getFlagList() {
return ((bitField0_ & 0x00000008) != 0) ?
java.util.Collections.unmodifiableList(flag_) : flag_;
}
/**
* repeated uint32 flag = 4 [packed = true];
* @return The count of flag.
*/
public int getFlagCount() {
return flag_.size();
}
/**
* repeated uint32 flag = 4 [packed = true];
* @param index The index of the element to return.
* @return The flag at the given index.
*/
public int getFlag(int index) {
return flag_.getInt(index);
}
/**
* repeated uint32 flag = 4 [packed = true];
* @param index The index to set the value at.
* @param value The flag to set.
* @return This builder for chaining.
*/
public Builder setFlag(
int index, int value) {
ensureFlagIsMutable();
flag_.setInt(index, value);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
* @param value The flag to add.
* @return This builder for chaining.
*/
public Builder addFlag(int value) {
ensureFlagIsMutable();
flag_.addInt(value);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
* @param values The flag to add.
* @return This builder for chaining.
*/
public Builder addAllFlag(
java.lang.Iterable extends java.lang.Integer> values) {
ensureFlagIsMutable();
org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, flag_);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
* @return This builder for chaining.
*/
public Builder clearFlag() {
flag_ = emptyIntList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.PipelineAckProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.PipelineAckProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public PipelineAckProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ReadOpChecksumInfoProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReadOpChecksumInfoProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
* @return Whether the checksum field is set.
*/
boolean hasChecksum();
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
* @return The checksum.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum();
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder();
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
* @return Whether the chunkOffset field is set.
*/
boolean hasChunkOffset();
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
* @return The chunkOffset.
*/
long getChunkOffset();
}
/**
*
**
* Sent as part of the BlockOpResponseProto
* for READ_BLOCK and COPY_BLOCK operations.
*
*
* Protobuf type {@code hadoop.hdfs.ReadOpChecksumInfoProto}
*/
public static final class ReadOpChecksumInfoProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ReadOpChecksumInfoProto)
ReadOpChecksumInfoProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ReadOpChecksumInfoProto.newBuilder() to construct.
private ReadOpChecksumInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ReadOpChecksumInfoProto() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new ReadOpChecksumInfoProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
}
private int bitField0_;
public static final int CHECKSUM_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_;
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
* @return Whether the checksum field is set.
*/
@java.lang.Override
public boolean hasChecksum() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
* @return The checksum.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
}
public static final int CHUNKOFFSET_FIELD_NUMBER = 2;
private long chunkOffset_ = 0L;
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
* @return Whether the chunkOffset field is set.
*/
@java.lang.Override
public boolean hasChunkOffset() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
* @return The chunkOffset.
*/
@java.lang.Override
public long getChunkOffset() {
return chunkOffset_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasChecksum()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasChunkOffset()) {
memoizedIsInitialized = 0;
return false;
}
if (!getChecksum().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getChecksum());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(2, chunkOffset_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(1, getChecksum());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(2, chunkOffset_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj;
if (hasChecksum() != other.hasChecksum()) return false;
if (hasChecksum()) {
if (!getChecksum()
.equals(other.getChecksum())) return false;
}
if (hasChunkOffset() != other.hasChunkOffset()) return false;
if (hasChunkOffset()) {
if (getChunkOffset()
!= other.getChunkOffset()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasChecksum()) {
hash = (37 * hash) + CHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getChecksum().hashCode();
}
if (hasChunkOffset()) {
hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getChunkOffset());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Sent as part of the BlockOpResponseProto
* for READ_BLOCK and COPY_BLOCK operations.
*
*
* Protobuf type {@code hadoop.hdfs.ReadOpChecksumInfoProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReadOpChecksumInfoProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getChecksumFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
checksum_ = null;
if (checksumBuilder_ != null) {
checksumBuilder_.dispose();
checksumBuilder_ = null;
}
chunkOffset_ = 0L;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.checksum_ = checksumBuilder_ == null
? checksum_
: checksumBuilder_.build();
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.chunkOffset_ = chunkOffset_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this;
if (other.hasChecksum()) {
mergeChecksum(other.getChecksum());
}
if (other.hasChunkOffset()) {
setChunkOffset(other.getChunkOffset());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasChecksum()) {
return false;
}
if (!hasChunkOffset()) {
return false;
}
if (!getChecksum().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
input.readMessage(
getChecksumFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000001;
break;
} // case 10
case 16: {
chunkOffset_ = input.readUInt64();
bitField0_ |= 0x00000002;
break;
} // case 16
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> checksumBuilder_;
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
* @return Whether the checksum field is set.
*/
public boolean hasChecksum() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
* @return The checksum.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
if (checksumBuilder_ == null) {
return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
} else {
return checksumBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (checksumBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
checksum_ = value;
} else {
checksumBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder setChecksum(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
if (checksumBuilder_ == null) {
checksum_ = builderForValue.build();
} else {
checksumBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder mergeChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (checksumBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
checksum_ != null &&
checksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
getChecksumBuilder().mergeFrom(value);
} else {
checksum_ = value;
}
} else {
checksumBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder clearChecksum() {
bitField0_ = (bitField0_ & ~0x00000001);
checksum_ = null;
if (checksumBuilder_ != null) {
checksumBuilder_.dispose();
checksumBuilder_ = null;
}
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getChecksumFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
if (checksumBuilder_ != null) {
return checksumBuilder_.getMessageOrBuilder();
} else {
return checksum_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
}
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>
getChecksumFieldBuilder() {
if (checksumBuilder_ == null) {
checksumBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
getChecksum(),
getParentForChildren(),
isClean());
checksum_ = null;
}
return checksumBuilder_;
}
private long chunkOffset_ ;
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
* @return Whether the chunkOffset field is set.
*/
@java.lang.Override
public boolean hasChunkOffset() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
* @return The chunkOffset.
*/
@java.lang.Override
public long getChunkOffset() {
return chunkOffset_;
}
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
* @param value The chunkOffset to set.
* @return This builder for chaining.
*/
public Builder setChunkOffset(long value) {
chunkOffset_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*
* required uint64 chunkOffset = 2;
* @return This builder for chaining.
*/
public Builder clearChunkOffset() {
bitField0_ = (bitField0_ & ~0x00000002);
chunkOffset_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReadOpChecksumInfoProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ReadOpChecksumInfoProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ReadOpChecksumInfoProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface BlockOpResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockOpResponseProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
/**
* optional string firstBadLink = 2;
* @return Whether the firstBadLink field is set.
*/
boolean hasFirstBadLink();
/**
* optional string firstBadLink = 2;
* @return The firstBadLink.
*/
java.lang.String getFirstBadLink();
/**
* optional string firstBadLink = 2;
* @return The bytes for firstBadLink.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getFirstBadLinkBytes();
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
* @return Whether the checksumResponse field is set.
*/
boolean hasChecksumResponse();
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
* @return The checksumResponse.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse();
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder();
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
* @return Whether the readOpChecksumInfo field is set.
*/
boolean hasReadOpChecksumInfo();
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
* @return The readOpChecksumInfo.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo();
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder();
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
* @return Whether the message field is set.
*/
boolean hasMessage();
/**
*
** explanatory text which may be useful to log on the client side
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
* @return The bytes for message.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getMessageBytes();
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
* @return Whether the shortCircuitAccessVersion field is set.
*/
boolean hasShortCircuitAccessVersion();
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
* @return The shortCircuitAccessVersion.
*/
int getShortCircuitAccessVersion();
}
/**
* Protobuf type {@code hadoop.hdfs.BlockOpResponseProto}
*/
public static final class BlockOpResponseProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockOpResponseProto)
BlockOpResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use BlockOpResponseProto.newBuilder() to construct.
private BlockOpResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private BlockOpResponseProto() {
status_ = 0;
firstBadLink_ = "";
message_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new BlockOpResponseProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
@java.lang.Override public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
@java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
public static final int FIRSTBADLINK_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private volatile java.lang.Object firstBadLink_ = "";
/**
* optional string firstBadLink = 2;
* @return Whether the firstBadLink field is set.
*/
@java.lang.Override
public boolean hasFirstBadLink() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string firstBadLink = 2;
* @return The firstBadLink.
*/
@java.lang.Override
public java.lang.String getFirstBadLink() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
firstBadLink_ = s;
}
return s;
}
}
/**
* optional string firstBadLink = 2;
* @return The bytes for firstBadLink.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getFirstBadLinkBytes() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
firstBadLink_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int CHECKSUMRESPONSE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_;
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
* @return Whether the checksumResponse field is set.
*/
@java.lang.Override
public boolean hasChecksumResponse() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
* @return The checksumResponse.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
}
public static final int READOPCHECKSUMINFO_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_;
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
* @return Whether the readOpChecksumInfo field is set.
*/
@java.lang.Override
public boolean hasReadOpChecksumInfo() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
* @return The readOpChecksumInfo.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
}
public static final int MESSAGE_FIELD_NUMBER = 5;
@SuppressWarnings("serial")
private volatile java.lang.Object message_ = "";
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
* @return Whether the message field is set.
*/
@java.lang.Override
public boolean hasMessage() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
** explanatory text which may be useful to log on the client side
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
* @return The bytes for message.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int SHORTCIRCUITACCESSVERSION_FIELD_NUMBER = 6;
private int shortCircuitAccessVersion_ = 0;
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
* @return Whether the shortCircuitAccessVersion field is set.
*/
@java.lang.Override
public boolean hasShortCircuitAccessVersion() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
* @return The shortCircuitAccessVersion.
*/
@java.lang.Override
public int getShortCircuitAccessVersion() {
return shortCircuitAccessVersion_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (hasChecksumResponse()) {
if (!getChecksumResponse().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasReadOpChecksumInfo()) {
if (!getReadOpChecksumInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, firstBadLink_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getChecksumResponse());
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeMessage(4, getReadOpChecksumInfo());
}
if (((bitField0_ & 0x00000010) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, message_);
}
if (((bitField0_ & 0x00000020) != 0)) {
output.writeUInt32(6, shortCircuitAccessVersion_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, firstBadLink_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(3, getChecksumResponse());
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(4, getReadOpChecksumInfo());
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, message_);
}
if (((bitField0_ & 0x00000020) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(6, shortCircuitAccessVersion_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (hasFirstBadLink() != other.hasFirstBadLink()) return false;
if (hasFirstBadLink()) {
if (!getFirstBadLink()
.equals(other.getFirstBadLink())) return false;
}
if (hasChecksumResponse() != other.hasChecksumResponse()) return false;
if (hasChecksumResponse()) {
if (!getChecksumResponse()
.equals(other.getChecksumResponse())) return false;
}
if (hasReadOpChecksumInfo() != other.hasReadOpChecksumInfo()) return false;
if (hasReadOpChecksumInfo()) {
if (!getReadOpChecksumInfo()
.equals(other.getReadOpChecksumInfo())) return false;
}
if (hasMessage() != other.hasMessage()) return false;
if (hasMessage()) {
if (!getMessage()
.equals(other.getMessage())) return false;
}
if (hasShortCircuitAccessVersion() != other.hasShortCircuitAccessVersion()) return false;
if (hasShortCircuitAccessVersion()) {
if (getShortCircuitAccessVersion()
!= other.getShortCircuitAccessVersion()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
if (hasFirstBadLink()) {
hash = (37 * hash) + FIRSTBADLINK_FIELD_NUMBER;
hash = (53 * hash) + getFirstBadLink().hashCode();
}
if (hasChecksumResponse()) {
hash = (37 * hash) + CHECKSUMRESPONSE_FIELD_NUMBER;
hash = (53 * hash) + getChecksumResponse().hashCode();
}
if (hasReadOpChecksumInfo()) {
hash = (37 * hash) + READOPCHECKSUMINFO_FIELD_NUMBER;
hash = (53 * hash) + getReadOpChecksumInfo().hashCode();
}
if (hasMessage()) {
hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
hash = (53 * hash) + getMessage().hashCode();
}
if (hasShortCircuitAccessVersion()) {
hash = (37 * hash) + SHORTCIRCUITACCESSVERSION_FIELD_NUMBER;
hash = (53 * hash) + getShortCircuitAccessVersion();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlockOpResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockOpResponseProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getChecksumResponseFieldBuilder();
getReadOpChecksumInfoFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
status_ = 0;
firstBadLink_ = "";
checksumResponse_ = null;
if (checksumResponseBuilder_ != null) {
checksumResponseBuilder_.dispose();
checksumResponseBuilder_ = null;
}
readOpChecksumInfo_ = null;
if (readOpChecksumInfoBuilder_ != null) {
readOpChecksumInfoBuilder_.dispose();
readOpChecksumInfoBuilder_ = null;
}
message_ = "";
shortCircuitAccessVersion_ = 0;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.status_ = status_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.firstBadLink_ = firstBadLink_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.checksumResponse_ = checksumResponseBuilder_ == null
? checksumResponse_
: checksumResponseBuilder_.build();
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.readOpChecksumInfo_ = readOpChecksumInfoBuilder_ == null
? readOpChecksumInfo_
: readOpChecksumInfoBuilder_.build();
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.message_ = message_;
to_bitField0_ |= 0x00000010;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
result.shortCircuitAccessVersion_ = shortCircuitAccessVersion_;
to_bitField0_ |= 0x00000020;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasFirstBadLink()) {
firstBadLink_ = other.firstBadLink_;
bitField0_ |= 0x00000002;
onChanged();
}
if (other.hasChecksumResponse()) {
mergeChecksumResponse(other.getChecksumResponse());
}
if (other.hasReadOpChecksumInfo()) {
mergeReadOpChecksumInfo(other.getReadOpChecksumInfo());
}
if (other.hasMessage()) {
message_ = other.message_;
bitField0_ |= 0x00000010;
onChanged();
}
if (other.hasShortCircuitAccessVersion()) {
setShortCircuitAccessVersion(other.getShortCircuitAccessVersion());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
if (hasChecksumResponse()) {
if (!getChecksumResponse().isInitialized()) {
return false;
}
}
if (hasReadOpChecksumInfo()) {
if (!getReadOpChecksumInfo().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(1, tmpRaw);
} else {
status_ = tmpRaw;
bitField0_ |= 0x00000001;
}
break;
} // case 8
case 18: {
firstBadLink_ = input.readBytes();
bitField0_ |= 0x00000002;
break;
} // case 18
case 26: {
input.readMessage(
getChecksumResponseFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000004;
break;
} // case 26
case 34: {
input.readMessage(
getReadOpChecksumInfoFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000008;
break;
} // case 34
case 42: {
message_ = input.readBytes();
bitField0_ |= 0x00000010;
break;
} // case 42
case 48: {
shortCircuitAccessVersion_ = input.readUInt32();
bitField0_ |= 0x00000020;
break;
} // case 48
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
@java.lang.Override public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
* @param value The status to set.
* @return This builder for chaining.
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return This builder for chaining.
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
private java.lang.Object firstBadLink_ = "";
/**
* optional string firstBadLink = 2;
* @return Whether the firstBadLink field is set.
*/
public boolean hasFirstBadLink() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string firstBadLink = 2;
* @return The firstBadLink.
*/
public java.lang.String getFirstBadLink() {
java.lang.Object ref = firstBadLink_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
firstBadLink_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string firstBadLink = 2;
* @return The bytes for firstBadLink.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getFirstBadLinkBytes() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
firstBadLink_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string firstBadLink = 2;
* @param value The firstBadLink to set.
* @return This builder for chaining.
*/
public Builder setFirstBadLink(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
firstBadLink_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* optional string firstBadLink = 2;
* @return This builder for chaining.
*/
public Builder clearFirstBadLink() {
firstBadLink_ = getDefaultInstance().getFirstBadLink();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
* optional string firstBadLink = 2;
* @param value The bytes for firstBadLink to set.
* @return This builder for chaining.
*/
public Builder setFirstBadLinkBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
firstBadLink_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder> checksumResponseBuilder_;
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
* @return Whether the checksumResponse field is set.
*/
public boolean hasChecksumResponse() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
* @return The checksumResponse.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
if (checksumResponseBuilder_ == null) {
return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
} else {
return checksumResponseBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder setChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
if (checksumResponseBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
checksumResponse_ = value;
} else {
checksumResponseBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder setChecksumResponse(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder builderForValue) {
if (checksumResponseBuilder_ == null) {
checksumResponse_ = builderForValue.build();
} else {
checksumResponseBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder mergeChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
if (checksumResponseBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
checksumResponse_ != null &&
checksumResponse_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) {
getChecksumResponseBuilder().mergeFrom(value);
} else {
checksumResponse_ = value;
}
} else {
checksumResponseBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder clearChecksumResponse() {
bitField0_ = (bitField0_ & ~0x00000004);
checksumResponse_ = null;
if (checksumResponseBuilder_ != null) {
checksumResponseBuilder_.dispose();
checksumResponseBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder getChecksumResponseBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getChecksumResponseFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
if (checksumResponseBuilder_ != null) {
return checksumResponseBuilder_.getMessageOrBuilder();
} else {
return checksumResponse_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
}
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>
getChecksumResponseFieldBuilder() {
if (checksumResponseBuilder_ == null) {
checksumResponseBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>(
getChecksumResponse(),
getParentForChildren(),
isClean());
checksumResponse_ = null;
}
return checksumResponseBuilder_;
}
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder> readOpChecksumInfoBuilder_;
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
* @return Whether the readOpChecksumInfo field is set.
*/
public boolean hasReadOpChecksumInfo() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
* @return The readOpChecksumInfo.
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
if (readOpChecksumInfoBuilder_ == null) {
return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
} else {
return readOpChecksumInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder setReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
if (readOpChecksumInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
readOpChecksumInfo_ = value;
} else {
readOpChecksumInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder setReadOpChecksumInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder builderForValue) {
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfo_ = builderForValue.build();
} else {
readOpChecksumInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder mergeReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
if (readOpChecksumInfoBuilder_ == null) {
if (((bitField0_ & 0x00000008) != 0) &&
readOpChecksumInfo_ != null &&
readOpChecksumInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) {
getReadOpChecksumInfoBuilder().mergeFrom(value);
} else {
readOpChecksumInfo_ = value;
}
} else {
readOpChecksumInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder clearReadOpChecksumInfo() {
bitField0_ = (bitField0_ & ~0x00000008);
readOpChecksumInfo_ = null;
if (readOpChecksumInfoBuilder_ != null) {
readOpChecksumInfoBuilder_.dispose();
readOpChecksumInfoBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder getReadOpChecksumInfoBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getReadOpChecksumInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
if (readOpChecksumInfoBuilder_ != null) {
return readOpChecksumInfoBuilder_.getMessageOrBuilder();
} else {
return readOpChecksumInfo_ == null ?
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
}
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>
getReadOpChecksumInfoFieldBuilder() {
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>(
getReadOpChecksumInfo(),
getParentForChildren(),
isClean());
readOpChecksumInfo_ = null;
}
return readOpChecksumInfoBuilder_;
}
private java.lang.Object message_ = "";
/**
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
* @return Whether the message field is set.
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
** explanatory text which may be useful to log on the client side
*
** explanatory text which may be useful to log on the client side
*
*
* optional string message = 5;
* @param value The bytes for message to set.
* @return This builder for chaining.
*/
public Builder setMessageBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
message_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
private int shortCircuitAccessVersion_ ;
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
* @return Whether the shortCircuitAccessVersion field is set.
*/
@java.lang.Override
public boolean hasShortCircuitAccessVersion() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
* @return The shortCircuitAccessVersion.
*/
@java.lang.Override
public int getShortCircuitAccessVersion() {
return shortCircuitAccessVersion_;
}
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
* @param value The shortCircuitAccessVersion to set.
* @return This builder for chaining.
*/
public Builder setShortCircuitAccessVersion(int value) {
shortCircuitAccessVersion_ = value;
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*
* optional uint32 shortCircuitAccessVersion = 6;
* @return This builder for chaining.
*/
public Builder clearShortCircuitAccessVersion() {
bitField0_ = (bitField0_ & ~0x00000020);
shortCircuitAccessVersion_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockOpResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockOpResponseProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public BlockOpResponseProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ClientReadStatusProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ClientReadStatusProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
}
/**
*
**
* Message sent from the client to the DN after reading the entire
* read request.
*
*
* Protobuf type {@code hadoop.hdfs.ClientReadStatusProto}
*/
public static final class ClientReadStatusProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ClientReadStatusProto)
ClientReadStatusProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ClientReadStatusProto.newBuilder() to construct.
private ClientReadStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ClientReadStatusProto() {
status_ = 0;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new ClientReadStatusProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
@java.lang.Override public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
@java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Message sent from the client to the DN after reading the entire
* read request.
*
*
* Protobuf type {@code hadoop.hdfs.ClientReadStatusProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ClientReadStatusProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.newBuilder()
private Builder() {
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
status_ = 0;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.status_ = status_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(1, tmpRaw);
} else {
status_ = tmpRaw;
bitField0_ |= 0x00000001;
}
break;
} // case 8
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
@java.lang.Override public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
* @param value The status to set.
* @return This builder for chaining.
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return This builder for chaining.
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ClientReadStatusProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientReadStatusProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ClientReadStatusProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface DNTransferAckProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.DNTransferAckProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
}
/**
* Protobuf type {@code hadoop.hdfs.DNTransferAckProto}
*/
public static final class DNTransferAckProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.DNTransferAckProto)
DNTransferAckProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use DNTransferAckProto.newBuilder() to construct.
private DNTransferAckProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private DNTransferAckProto() {
status_ = 0;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new DNTransferAckProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
}
private int bitField0_;
public static final int STATUS_FIELD_NUMBER = 1;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
@java.lang.Override public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
@java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, status_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, status_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) obj;
if (hasStatus() != other.hasStatus()) return false;
if (hasStatus()) {
if (status_ != other.status_) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + status_;
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DNTransferAckProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.DNTransferAckProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.newBuilder()
private Builder() {
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
status_ = 0;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.status_ = status_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(1, tmpRaw);
} else {
status_ = tmpRaw;
bitField0_ |= 0x00000001;
}
break;
} // case 8
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private int status_ = 0;
/**
* required .hadoop.hdfs.Status status = 1;
* @return Whether the status field is set.
*/
@java.lang.Override public boolean hasStatus() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return The status.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
}
/**
* required .hadoop.hdfs.Status status = 1;
* @param value The status to set.
* @return This builder for chaining.
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
* @return This builder for chaining.
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DNTransferAckProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DNTransferAckProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public DNTransferAckProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpBlockChecksumResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpBlockChecksumResponseProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required uint32 bytesPerCrc = 1;
* @return Whether the bytesPerCrc field is set.
*/
boolean hasBytesPerCrc();
/**
* required uint32 bytesPerCrc = 1;
* @return The bytesPerCrc.
*/
int getBytesPerCrc();
/**
* required uint64 crcPerBlock = 2;
* @return Whether the crcPerBlock field is set.
*/
boolean hasCrcPerBlock();
/**
* required uint64 crcPerBlock = 2;
* @return The crcPerBlock.
*/
long getCrcPerBlock();
/**
* required bytes blockChecksum = 3;
* @return Whether the blockChecksum field is set.
*/
boolean hasBlockChecksum();
/**
* required bytes blockChecksum = 3;
* @return The blockChecksum.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum();
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
* @return Whether the crcType field is set.
*/
boolean hasCrcType();
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
* @return The crcType.
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
* @return Whether the blockChecksumOptions field is set.
*/
boolean hasBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
* @return The blockChecksumOptions.
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumResponseProto}
*/
public static final class OpBlockChecksumResponseProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpBlockChecksumResponseProto)
OpBlockChecksumResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpBlockChecksumResponseProto.newBuilder() to construct.
private OpBlockChecksumResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpBlockChecksumResponseProto() {
blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
crcType_ = 0;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new OpBlockChecksumResponseProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
}
private int bitField0_;
public static final int BYTESPERCRC_FIELD_NUMBER = 1;
private int bytesPerCrc_ = 0;
/**
* required uint32 bytesPerCrc = 1;
* @return Whether the bytesPerCrc field is set.
*/
@java.lang.Override
public boolean hasBytesPerCrc() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required uint32 bytesPerCrc = 1;
* @return The bytesPerCrc.
*/
@java.lang.Override
public int getBytesPerCrc() {
return bytesPerCrc_;
}
public static final int CRCPERBLOCK_FIELD_NUMBER = 2;
private long crcPerBlock_ = 0L;
/**
* required uint64 crcPerBlock = 2;
* @return Whether the crcPerBlock field is set.
*/
@java.lang.Override
public boolean hasCrcPerBlock() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required uint64 crcPerBlock = 2;
* @return The crcPerBlock.
*/
@java.lang.Override
public long getCrcPerBlock() {
return crcPerBlock_;
}
public static final int BLOCKCHECKSUM_FIELD_NUMBER = 3;
private org.apache.hadoop.thirdparty.protobuf.ByteString blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
* required bytes blockChecksum = 3;
* @return Whether the blockChecksum field is set.
*/
@java.lang.Override
public boolean hasBlockChecksum() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required bytes blockChecksum = 3;
* @return The blockChecksum.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum() {
return blockChecksum_;
}
public static final int CRCTYPE_FIELD_NUMBER = 4;
private int crcType_ = 0;
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
* @return Whether the crcType field is set.
*/
@java.lang.Override public boolean hasCrcType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
* @return The crcType.
*/
@java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(crcType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
}
public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
* @return Whether the blockChecksumOptions field is set.
*/
@java.lang.Override
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
* @return The blockChecksumOptions.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBytesPerCrc()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCrcPerBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockChecksum()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt32(1, bytesPerCrc_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeUInt64(2, crcPerBlock_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeBytes(3, blockChecksum_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeEnum(4, crcType_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeMessage(5, getBlockChecksumOptions());
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(1, bytesPerCrc_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(2, crcPerBlock_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBytesSize(3, blockChecksum_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(4, crcType_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(5, getBlockChecksumOptions());
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) obj;
if (hasBytesPerCrc() != other.hasBytesPerCrc()) return false;
if (hasBytesPerCrc()) {
if (getBytesPerCrc()
!= other.getBytesPerCrc()) return false;
}
if (hasCrcPerBlock() != other.hasCrcPerBlock()) return false;
if (hasCrcPerBlock()) {
if (getCrcPerBlock()
!= other.getCrcPerBlock()) return false;
}
if (hasBlockChecksum() != other.hasBlockChecksum()) return false;
if (hasBlockChecksum()) {
if (!getBlockChecksum()
.equals(other.getBlockChecksum())) return false;
}
if (hasCrcType() != other.hasCrcType()) return false;
if (hasCrcType()) {
if (crcType_ != other.crcType_) return false;
}
if (hasBlockChecksumOptions() != other.hasBlockChecksumOptions()) return false;
if (hasBlockChecksumOptions()) {
if (!getBlockChecksumOptions()
.equals(other.getBlockChecksumOptions())) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBytesPerCrc()) {
hash = (37 * hash) + BYTESPERCRC_FIELD_NUMBER;
hash = (53 * hash) + getBytesPerCrc();
}
if (hasCrcPerBlock()) {
hash = (37 * hash) + CRCPERBLOCK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getCrcPerBlock());
}
if (hasBlockChecksum()) {
hash = (37 * hash) + BLOCKCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksum().hashCode();
}
if (hasCrcType()) {
hash = (37 * hash) + CRCTYPE_FIELD_NUMBER;
hash = (53 * hash) + crcType_;
}
if (hasBlockChecksumOptions()) {
hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksumOptions().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpBlockChecksumResponseProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getBlockChecksumOptionsFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
bytesPerCrc_ = 0;
crcPerBlock_ = 0L;
blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
crcType_ = 0;
blockChecksumOptions_ = null;
if (blockChecksumOptionsBuilder_ != null) {
blockChecksumOptionsBuilder_.dispose();
blockChecksumOptionsBuilder_ = null;
}
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.bytesPerCrc_ = bytesPerCrc_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.crcPerBlock_ = crcPerBlock_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.blockChecksum_ = blockChecksum_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.crcType_ = crcType_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.blockChecksumOptions_ = blockChecksumOptionsBuilder_ == null
? blockChecksumOptions_
: blockChecksumOptionsBuilder_.build();
to_bitField0_ |= 0x00000010;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) return this;
if (other.hasBytesPerCrc()) {
setBytesPerCrc(other.getBytesPerCrc());
}
if (other.hasCrcPerBlock()) {
setCrcPerBlock(other.getCrcPerBlock());
}
if (other.hasBlockChecksum()) {
setBlockChecksum(other.getBlockChecksum());
}
if (other.hasCrcType()) {
setCrcType(other.getCrcType());
}
if (other.hasBlockChecksumOptions()) {
mergeBlockChecksumOptions(other.getBlockChecksumOptions());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBytesPerCrc()) {
return false;
}
if (!hasCrcPerBlock()) {
return false;
}
if (!hasBlockChecksum()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bytesPerCrc_ = input.readUInt32();
bitField0_ |= 0x00000001;
break;
} // case 8
case 16: {
crcPerBlock_ = input.readUInt64();
bitField0_ |= 0x00000002;
break;
} // case 16
case 26: {
blockChecksum_ = input.readBytes();
bitField0_ |= 0x00000004;
break;
} // case 26
case 32: {
int tmpRaw = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto tmpValue =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(4, tmpRaw);
} else {
crcType_ = tmpRaw;
bitField0_ |= 0x00000008;
}
break;
} // case 32
case 42: {
input.readMessage(
getBlockChecksumOptionsFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000010;
break;
} // case 42
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private int bytesPerCrc_ ;
/**
* required uint32 bytesPerCrc = 1;
* @return Whether the bytesPerCrc field is set.
*/
@java.lang.Override
public boolean hasBytesPerCrc() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required uint32 bytesPerCrc = 1;
* @return The bytesPerCrc.
*/
@java.lang.Override
public int getBytesPerCrc() {
return bytesPerCrc_;
}
/**
* required uint32 bytesPerCrc = 1;
* @param value The bytesPerCrc to set.
* @return This builder for chaining.
*/
public Builder setBytesPerCrc(int value) {
bytesPerCrc_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required uint32 bytesPerCrc = 1;
* @return This builder for chaining.
*/
public Builder clearBytesPerCrc() {
bitField0_ = (bitField0_ & ~0x00000001);
bytesPerCrc_ = 0;
onChanged();
return this;
}
private long crcPerBlock_ ;
/**
* required uint64 crcPerBlock = 2;
* @return Whether the crcPerBlock field is set.
*/
@java.lang.Override
public boolean hasCrcPerBlock() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required uint64 crcPerBlock = 2;
* @return The crcPerBlock.
*/
@java.lang.Override
public long getCrcPerBlock() {
return crcPerBlock_;
}
/**
* required uint64 crcPerBlock = 2;
* @param value The crcPerBlock to set.
* @return This builder for chaining.
*/
public Builder setCrcPerBlock(long value) {
crcPerBlock_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* required uint64 crcPerBlock = 2;
* @return This builder for chaining.
*/
public Builder clearCrcPerBlock() {
bitField0_ = (bitField0_ & ~0x00000002);
crcPerBlock_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.ByteString blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
* required bytes blockChecksum = 3;
* @return Whether the blockChecksum field is set.
*/
@java.lang.Override
public boolean hasBlockChecksum() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required bytes blockChecksum = 3;
* @return The blockChecksum.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum() {
return blockChecksum_;
}
/**
* required bytes blockChecksum = 3;
* @param value The blockChecksum to set.
* @return This builder for chaining.
*/
public Builder setBlockChecksum(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
blockChecksum_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* required bytes blockChecksum = 3;
* @return This builder for chaining.
*/
public Builder clearBlockChecksum() {
bitField0_ = (bitField0_ & ~0x00000004);
blockChecksum_ = getDefaultInstance().getBlockChecksum();
onChanged();
return this;
}
private int crcType_ = 0;
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
* @return Whether the crcType field is set.
*/
@java.lang.Override public boolean hasCrcType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
* @return The crcType.
*/
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(crcType_);
return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
* @param value The crcType to set.
* @return This builder for chaining.
*/
public Builder setCrcType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
crcType_ = value.getNumber();
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
* @return This builder for chaining.
*/
public Builder clearCrcType() {
bitField0_ = (bitField0_ & ~0x00000008);
crcType_ = 0;
onChanged();
return this;
}
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
* @return Whether the blockChecksumOptions field is set.
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
* @return The blockChecksumOptions.
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
} else {
return blockChecksumOptionsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockChecksumOptions_ = value;
} else {
blockChecksumOptionsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder setBlockChecksumOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = builderForValue.build();
} else {
blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (((bitField0_ & 0x00000010) != 0) &&
blockChecksumOptions_ != null &&
blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
getBlockChecksumOptionsBuilder().mergeFrom(value);
} else {
blockChecksumOptions_ = value;
}
} else {
blockChecksumOptionsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder clearBlockChecksumOptions() {
bitField0_ = (bitField0_ & ~0x00000010);
blockChecksumOptions_ = null;
if (blockChecksumOptionsBuilder_ != null) {
blockChecksumOptionsBuilder_.dispose();
blockChecksumOptionsBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getBlockChecksumOptionsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
if (blockChecksumOptionsBuilder_ != null) {
return blockChecksumOptionsBuilder_.getMessageOrBuilder();
} else {
return blockChecksumOptions_ == null ?
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>
getBlockChecksumOptionsFieldBuilder() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
getBlockChecksumOptions(),
getParentForChildren(),
isClean());
blockChecksumOptions_ = null;
}
return blockChecksumOptionsBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockChecksumResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockChecksumResponseProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpBlockChecksumResponseProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface OpCustomProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpCustomProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required string customId = 1;
* @return Whether the customId field is set.
*/
boolean hasCustomId();
/**
* required string customId = 1;
* @return The customId.
*/
java.lang.String getCustomId();
/**
* required string customId = 1;
* @return The bytes for customId.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getCustomIdBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.OpCustomProto}
*/
public static final class OpCustomProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.OpCustomProto)
OpCustomProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use OpCustomProto.newBuilder() to construct.
private OpCustomProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private OpCustomProto() {
customId_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new OpCustomProto();
}
@java.lang.Override
public final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.Builder.class);
}
private int bitField0_;
public static final int CUSTOMID_FIELD_NUMBER = 1;
@SuppressWarnings("serial")
private volatile java.lang.Object customId_ = "";
/**
* required string customId = 1;
* @return Whether the customId field is set.
*/
@java.lang.Override
public boolean hasCustomId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string customId = 1;
* @return The customId.
*/
@java.lang.Override
public java.lang.String getCustomId() {
java.lang.Object ref = customId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
customId_ = s;
}
return s;
}
}
/**
* required string customId = 1;
* @return The bytes for customId.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getCustomIdBytes() {
java.lang.Object ref = customId_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
customId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasCustomId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, customId_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, customId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) obj;
if (hasCustomId() != other.hasCustomId()) return false;
if (hasCustomId()) {
if (!getCustomId()
.equals(other.getCustomId())) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasCustomId()) {
hash = (37 * hash) + CUSTOMID_FIELD_NUMBER;
hash = (53 * hash) + getCustomId().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpCustomProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpCustomProto)
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.newBuilder()
private Builder() {
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
customId_ = "";
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.customId_ = customId_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.getDefaultInstance()) return this;
if (other.hasCustomId()) {
customId_ = other.customId_;
bitField0_ |= 0x00000001;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasCustomId()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
customId_ = input.readBytes();
bitField0_ |= 0x00000001;
break;
} // case 10
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private java.lang.Object customId_ = "";
/**
* required string customId = 1;
* @return Whether the customId field is set.
*/
public boolean hasCustomId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string customId = 1;
* @return The customId.
*/
public java.lang.String getCustomId() {
java.lang.Object ref = customId_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
customId_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string customId = 1;
* @return The bytes for customId.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getCustomIdBytes() {
java.lang.Object ref = customId_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
customId_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string customId = 1;
* @param value The customId to set.
* @return This builder for chaining.
*/
public Builder setCustomId(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
customId_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* required string customId = 1;
* @return This builder for chaining.
*/
public Builder clearCustomId() {
customId_ = getDefaultInstance().getCustomId();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
* required string customId = 1;
* @param value The bytes for customId to set.
* @return This builder for chaining.
*/
public Builder setCustomIdBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
customId_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpCustomProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpCustomProto)
private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto();
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public OpCustomProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ChecksumProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpCustomProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable;
public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\022datatransfer.proto\022\013hadoop.hdfs\032\016Secur" +
"ity.proto\032\nhdfs.proto\"\373\002\n!DataTransferEn" +
"cryptorMessageProto\022Z\n\006status\030\001 \002(\0162J.ha" +
"doop.hdfs.DataTransferEncryptorMessagePr" +
"oto.DataTransferEncryptorStatus\022\017\n\007paylo" +
"ad\030\002 \001(\014\022\017\n\007message\030\003 \001(\t\0224\n\014cipherOptio" +
"n\030\004 \003(\0132\036.hadoop.hdfs.CipherOptionProto\022" +
":\n\017handshakeSecret\030\005 \001(\0132!.hadoop.hdfs.H" +
"andshakeSecretProto\022\030\n\020accessTokenError\030" +
"\006 \001(\010\"L\n\033DataTransferEncryptorStatus\022\013\n\007" +
"SUCCESS\020\000\022\025\n\021ERROR_UNKNOWN_KEY\020\001\022\t\n\005ERRO" +
"R\020\002\"4\n\024HandshakeSecretProto\022\016\n\006secret\030\001 " +
"\002(\014\022\014\n\004bpid\030\002 \002(\t\"\247\001\n\017BaseHeaderProto\022.\n" +
"\005block\030\001 \002(\0132\037.hadoop.hdfs.ExtendedBlock" +
"Proto\022(\n\005token\030\002 \001(\0132\031.hadoop.common.Tok" +
"enProto\022:\n\ttraceInfo\030\003 \001(\0132\'.hadoop.hdfs" +
".DataTransferTraceInfoProto\"T\n\032DataTrans" +
"ferTraceInfoProto\022\017\n\007traceId\030\001 \001(\004\022\020\n\010pa" +
"rentId\030\002 \001(\004\022\023\n\013spanContext\030\003 \001(\014\"b\n\032Cli" +
"entOperationHeaderProto\0220\n\nbaseHeader\030\001 " +
"\002(\0132\034.hadoop.hdfs.BaseHeaderProto\022\022\n\ncli" +
"entName\030\002 \002(\t\"=\n\024CachingStrategyProto\022\022\n" +
"\ndropBehind\030\001 \001(\010\022\021\n\treadahead\030\002 \001(\003\"\301\001\n" +
"\020OpReadBlockProto\0227\n\006header\030\001 \002(\0132\'.hado" +
"op.hdfs.ClientOperationHeaderProto\022\016\n\006of" +
"fset\030\002 \002(\004\022\013\n\003len\030\003 \002(\004\022\033\n\rsendChecksums" +
"\030\004 \001(\010:\004true\022:\n\017cachingStrategy\030\005 \001(\0132!." +
"hadoop.hdfs.CachingStrategyProto\"W\n\rChec" +
"ksumProto\022,\n\004type\030\001 \002(\0162\036.hadoop.hdfs.Ch" +
"ecksumTypeProto\022\030\n\020bytesPerChecksum\030\002 \002(" +
"\r\"\305\007\n\021OpWriteBlockProto\0227\n\006header\030\001 \002(\0132" +
"\'.hadoop.hdfs.ClientOperationHeaderProto" +
"\022/\n\007targets\030\002 \003(\0132\036.hadoop.hdfs.Datanode" +
"InfoProto\022.\n\006source\030\003 \001(\0132\036.hadoop.hdfs." +
"DatanodeInfoProto\022D\n\005stage\030\004 \002(\01625.hadoo" +
"p.hdfs.OpWriteBlockProto.BlockConstructi" +
"onStage\022\024\n\014pipelineSize\030\005 \002(\r\022\024\n\014minByte" +
"sRcvd\030\006 \002(\004\022\024\n\014maxBytesRcvd\030\007 \002(\004\022\035\n\025lat" +
"estGenerationStamp\030\010 \002(\004\0225\n\021requestedChe" +
"cksum\030\t \002(\0132\032.hadoop.hdfs.ChecksumProto\022" +
":\n\017cachingStrategy\030\n \001(\0132!.hadoop.hdfs.C" +
"achingStrategyProto\0228\n\013storageType\030\013 \001(\016" +
"2\035.hadoop.hdfs.StorageTypeProto:\004DISK\0229\n" +
"\022targetStorageTypes\030\014 \003(\0162\035.hadoop.hdfs." +
"StorageTypeProto\022\037\n\020allowLazyPersist\030\r \001" +
"(\010:\005false\022\026\n\007pinning\030\016 \001(\010:\005false\022\026\n\016tar" +
"getPinnings\030\017 \003(\010\022\021\n\tstorageId\030\020 \001(\t\022\030\n\020" +
"targetStorageIds\030\021 \003(\t\"\210\002\n\026BlockConstruc" +
"tionStage\022\031\n\025PIPELINE_SETUP_APPEND\020\000\022\"\n\036" +
"PIPELINE_SETUP_APPEND_RECOVERY\020\001\022\022\n\016DATA" +
"_STREAMING\020\002\022%\n!PIPELINE_SETUP_STREAMING" +
"_RECOVERY\020\003\022\022\n\016PIPELINE_CLOSE\020\004\022\033\n\027PIPEL" +
"INE_CLOSE_RECOVERY\020\005\022\031\n\025PIPELINE_SETUP_C" +
"REATE\020\006\022\020\n\014TRANSFER_RBW\020\007\022\026\n\022TRANSFER_FI" +
"NALIZED\020\010\"\325\001\n\024OpTransferBlockProto\0227\n\006he" +
"ader\030\001 \002(\0132\'.hadoop.hdfs.ClientOperation" +
"HeaderProto\022/\n\007targets\030\002 \003(\0132\036.hadoop.hd" +
"fs.DatanodeInfoProto\0229\n\022targetStorageTyp" +
"es\030\003 \003(\0162\035.hadoop.hdfs.StorageTypeProto\022" +
"\030\n\020targetStorageIds\030\004 \003(\t\"\321\001\n\023OpReplaceB" +
"lockProto\022,\n\006header\030\001 \002(\0132\034.hadoop.hdfs." +
"BaseHeaderProto\022\017\n\007delHint\030\002 \002(\t\022.\n\006sour" +
"ce\030\003 \002(\0132\036.hadoop.hdfs.DatanodeInfoProto" +
"\0228\n\013storageType\030\004 \001(\0162\035.hadoop.hdfs.Stor" +
"ageTypeProto:\004DISK\022\021\n\tstorageId\030\005 \001(\t\"@\n" +
"\020OpCopyBlockProto\022,\n\006header\030\001 \002(\0132\034.hado" +
"op.hdfs.BaseHeaderProto\"\212\001\n\024OpBlockCheck" +
"sumProto\022,\n\006header\030\001 \002(\0132\034.hadoop.hdfs.B" +
"aseHeaderProto\022D\n\024blockChecksumOptions\030\002" +
" \001(\0132&.hadoop.hdfs.BlockChecksumOptionsP" +
"roto\"\335\002\n\031OpBlockGroupChecksumProto\022,\n\006he" +
"ader\030\001 \002(\0132\034.hadoop.hdfs.BaseHeaderProto" +
"\0222\n\tdatanodes\030\002 \002(\0132\037.hadoop.hdfs.Datano" +
"deInfosProto\022.\n\013blockTokens\030\003 \003(\0132\031.hado" +
"op.common.TokenProto\0227\n\010ecPolicy\030\004 \002(\0132%" +
".hadoop.hdfs.ErasureCodingPolicyProto\022\024\n" +
"\014blockIndices\030\005 \003(\r\022\031\n\021requestedNumBytes" +
"\030\006 \002(\004\022D\n\024blockChecksumOptions\030\007 \001(\0132&.h" +
"adoop.hdfs.BlockChecksumOptionsProto\"0\n\026" +
"ShortCircuitShmIdProto\022\n\n\002hi\030\001 \002(\003\022\n\n\002lo" +
"\030\002 \002(\003\"_\n\030ShortCircuitShmSlotProto\0222\n\005sh" +
"mId\030\001 \002(\0132#.hadoop.hdfs.ShortCircuitShmI" +
"dProto\022\017\n\007slotIdx\030\002 \002(\005\"\307\001\n OpRequestSho" +
"rtCircuitAccessProto\022,\n\006header\030\001 \002(\0132\034.h" +
"adoop.hdfs.BaseHeaderProto\022\022\n\nmaxVersion" +
"\030\002 \002(\r\0225\n\006slotId\030\003 \001(\0132%.hadoop.hdfs.Sho" +
"rtCircuitShmSlotProto\022*\n\033supportsReceipt" +
"Verification\030\004 \001(\010:\005false\"\232\001\n%ReleaseSho" +
"rtCircuitAccessRequestProto\0225\n\006slotId\030\001 " +
"\002(\0132%.hadoop.hdfs.ShortCircuitShmSlotPro" +
"to\022:\n\ttraceInfo\030\002 \001(\0132\'.hadoop.hdfs.Data" +
"TransferTraceInfoProto\"\\\n&ReleaseShortCi" +
"rcuitAccessResponseProto\022#\n\006status\030\001 \002(\016" +
"2\023.hadoop.hdfs.Status\022\r\n\005error\030\002 \001(\t\"m\n\033" +
"ShortCircuitShmRequestProto\022\022\n\nclientNam" +
"e\030\001 \002(\t\022:\n\ttraceInfo\030\002 \001(\0132\'.hadoop.hdfs" +
".DataTransferTraceInfoProto\"\203\001\n\034ShortCir" +
"cuitShmResponseProto\022#\n\006status\030\001 \002(\0162\023.h" +
"adoop.hdfs.Status\022\r\n\005error\030\002 \001(\t\022/\n\002id\030\003" +
" \001(\0132#.hadoop.hdfs.ShortCircuitShmIdProt" +
"o\"\177\n\021PacketHeaderProto\022\025\n\roffsetInBlock\030" +
"\001 \002(\020\022\r\n\005seqno\030\002 \002(\020\022\031\n\021lastPacketInBloc" +
"k\030\003 \002(\010\022\017\n\007dataLen\030\004 \002(\017\022\030\n\tsyncBlock\030\005 " +
"\001(\010:\005false\"z\n\020PipelineAckProto\022\r\n\005seqno\030" +
"\001 \002(\022\022\"\n\005reply\030\002 \003(\0162\023.hadoop.hdfs.Statu" +
"s\022!\n\026downstreamAckTimeNanos\030\003 \001(\004:\0010\022\020\n\004" +
"flag\030\004 \003(\rB\002\020\001\"\\\n\027ReadOpChecksumInfoProt" +
"o\022,\n\010checksum\030\001 \002(\0132\032.hadoop.hdfs.Checks" +
"umProto\022\023\n\013chunkOffset\030\002 \002(\004\"\214\002\n\024BlockOp" +
"ResponseProto\022#\n\006status\030\001 \002(\0162\023.hadoop.h" +
"dfs.Status\022\024\n\014firstBadLink\030\002 \001(\t\022C\n\020chec" +
"ksumResponse\030\003 \001(\0132).hadoop.hdfs.OpBlock" +
"ChecksumResponseProto\022@\n\022readOpChecksumI" +
"nfo\030\004 \001(\0132$.hadoop.hdfs.ReadOpChecksumIn" +
"foProto\022\017\n\007message\030\005 \001(\t\022!\n\031shortCircuit" +
"AccessVersion\030\006 \001(\r\"<\n\025ClientReadStatusP" +
"roto\022#\n\006status\030\001 \002(\0162\023.hadoop.hdfs.Statu" +
"s\"9\n\022DNTransferAckProto\022#\n\006status\030\001 \002(\0162" +
"\023.hadoop.hdfs.Status\"\326\001\n\034OpBlockChecksum" +
"ResponseProto\022\023\n\013bytesPerCrc\030\001 \002(\r\022\023\n\013cr" +
"cPerBlock\030\002 \002(\004\022\025\n\rblockChecksum\030\003 \002(\014\022/" +
"\n\007crcType\030\004 \001(\0162\036.hadoop.hdfs.ChecksumTy" +
"peProto\022D\n\024blockChecksumOptions\030\005 \001(\0132&." +
"hadoop.hdfs.BlockChecksumOptionsProto\"!\n" +
"\rOpCustomProto\022\020\n\010customId\030\001 \002(\t*\214\002\n\006Sta" +
"tus\022\013\n\007SUCCESS\020\000\022\t\n\005ERROR\020\001\022\022\n\016ERROR_CHE" +
"CKSUM\020\002\022\021\n\rERROR_INVALID\020\003\022\020\n\014ERROR_EXIS" +
"TS\020\004\022\026\n\022ERROR_ACCESS_TOKEN\020\005\022\017\n\013CHECKSUM" +
"_OK\020\006\022\025\n\021ERROR_UNSUPPORTED\020\007\022\017\n\013OOB_REST" +
"ART\020\010\022\021\n\rOOB_RESERVED1\020\t\022\021\n\rOOB_RESERVED" +
"2\020\n\022\021\n\rOOB_RESERVED3\020\013\022\017\n\013IN_PROGRESS\020\014\022" +
"\026\n\022ERROR_BLOCK_PINNED\020\r*[\n\026ShortCircuitF" +
"dResponse\022#\n\037DO_NOT_USE_RECEIPT_VERIFICA" +
"TION\020\000\022\034\n\030USE_RECEIPT_VERIFICATION\020\001B>\n%" +
"org.apache.hadoop.hdfs.protocol.protoB\022D" +
"ataTransferProtos\240\001\001"
};
descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
});
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor,
new java.lang.String[] { "Status", "Payload", "Message", "CipherOption", "HandshakeSecret", "AccessTokenError", });
internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor,
new java.lang.String[] { "Secret", "Bpid", });
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor,
new java.lang.String[] { "Block", "Token", "TraceInfo", });
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor,
new java.lang.String[] { "TraceId", "ParentId", "SpanContext", });
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor,
new java.lang.String[] { "BaseHeader", "ClientName", });
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor,
new java.lang.String[] { "DropBehind", "Readahead", });
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor,
new java.lang.String[] { "Header", "Offset", "Len", "SendChecksums", "CachingStrategy", });
internal_static_hadoop_hdfs_ChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ChecksumProto_descriptor,
new java.lang.String[] { "Type", "BytesPerChecksum", });
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor,
new java.lang.String[] { "Header", "Targets", "Source", "Stage", "PipelineSize", "MinBytesRcvd", "MaxBytesRcvd", "LatestGenerationStamp", "RequestedChecksum", "CachingStrategy", "StorageType", "TargetStorageTypes", "AllowLazyPersist", "Pinning", "TargetPinnings", "StorageId", "TargetStorageIds", });
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor,
new java.lang.String[] { "Header", "Targets", "TargetStorageTypes", "TargetStorageIds", });
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor,
new java.lang.String[] { "Header", "DelHint", "Source", "StorageType", "StorageId", });
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor,
new java.lang.String[] { "Header", });
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor,
new java.lang.String[] { "Header", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(13);
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor,
new java.lang.String[] { "Header", "Datanodes", "BlockTokens", "EcPolicy", "BlockIndices", "RequestedNumBytes", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor =
getDescriptor().getMessageTypes().get(14);
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor,
new java.lang.String[] { "Hi", "Lo", });
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor,
new java.lang.String[] { "ShmId", "SlotIdx", });
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor =
getDescriptor().getMessageTypes().get(16);
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor,
new java.lang.String[] { "Header", "MaxVersion", "SlotId", "SupportsReceiptVerification", });
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor =
getDescriptor().getMessageTypes().get(17);
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor,
new java.lang.String[] { "SlotId", "TraceInfo", });
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor =
getDescriptor().getMessageTypes().get(18);
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor,
new java.lang.String[] { "Status", "Error", });
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor =
getDescriptor().getMessageTypes().get(19);
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor,
new java.lang.String[] { "ClientName", "TraceInfo", });
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor =
getDescriptor().getMessageTypes().get(20);
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor,
new java.lang.String[] { "Status", "Error", "Id", });
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(21);
internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor,
new java.lang.String[] { "OffsetInBlock", "Seqno", "LastPacketInBlock", "DataLen", "SyncBlock", });
internal_static_hadoop_hdfs_PipelineAckProto_descriptor =
getDescriptor().getMessageTypes().get(22);
internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_PipelineAckProto_descriptor,
new java.lang.String[] { "Seqno", "Reply", "DownstreamAckTimeNanos", "Flag", });
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor =
getDescriptor().getMessageTypes().get(23);
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor,
new java.lang.String[] { "Checksum", "ChunkOffset", });
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor =
getDescriptor().getMessageTypes().get(24);
internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor,
new java.lang.String[] { "Status", "FirstBadLink", "ChecksumResponse", "ReadOpChecksumInfo", "Message", "ShortCircuitAccessVersion", });
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor =
getDescriptor().getMessageTypes().get(25);
internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor,
new java.lang.String[] { "Status", });
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor =
getDescriptor().getMessageTypes().get(26);
internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor,
new java.lang.String[] { "Status", });
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor =
getDescriptor().getMessageTypes().get(27);
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor,
new java.lang.String[] { "BytesPerCrc", "CrcPerBlock", "BlockChecksum", "CrcType", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_OpCustomProto_descriptor =
getDescriptor().getMessageTypes().get(28);
internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_OpCustomProto_descriptor,
new java.lang.String[] { "CustomId", });
org.apache.hadoop.security.proto.SecurityProtos.getDescriptor();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
}