Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
*
* FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
* cross-serialization is not an explicitly supported use case. Unlike HDFS,
* most fields are optional and do not define defaults.
*
*/
public static final class FileStatusProto extends
com.google.protobuf.GeneratedMessage
implements FileStatusProtoOrBuilder {
// Use FileStatusProto.newBuilder() to construct.
private FileStatusProto(com.google.protobuf.GeneratedMessage.Builder builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private FileStatusProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final FileStatusProto defaultInstance;
public static FileStatusProto getDefaultInstance() {
return defaultInstance;
}
public FileStatusProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private FileStatusProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType value = org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
fileType_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
path_ = input.readBytes();
break;
}
case 24: {
bitField0_ |= 0x00000004;
length_ = input.readUInt64();
break;
}
case 34: {
org.apache.hadoop.fs.FSProtos.FsPermissionProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = permission_.toBuilder();
}
permission_ = input.readMessage(org.apache.hadoop.fs.FSProtos.FsPermissionProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(permission_);
permission_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
bitField0_ |= 0x00000010;
owner_ = input.readBytes();
break;
}
case 50: {
bitField0_ |= 0x00000020;
group_ = input.readBytes();
break;
}
case 56: {
bitField0_ |= 0x00000040;
modificationTime_ = input.readUInt64();
break;
}
case 64: {
bitField0_ |= 0x00000080;
accessTime_ = input.readUInt64();
break;
}
case 74: {
bitField0_ |= 0x00000100;
symlink_ = input.readBytes();
break;
}
case 80: {
bitField0_ |= 0x00000200;
blockReplication_ = input.readUInt32();
break;
}
case 88: {
bitField0_ |= 0x00000400;
blockSize_ = input.readUInt64();
break;
}
case 122: {
bitField0_ |= 0x00000800;
encryptionData_ = input.readBytes();
break;
}
case 138: {
bitField0_ |= 0x00001000;
ecData_ = input.readBytes();
break;
}
case 144: {
bitField0_ |= 0x00002000;
flags_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FileStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FileStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.fs.FSProtos.FileStatusProto.class, org.apache.hadoop.fs.FSProtos.FileStatusProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public FileStatusProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new FileStatusProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.fs.FileStatusProto.FileType}
*/
public enum FileType
implements com.google.protobuf.ProtocolMessageEnum {
/**
* FT_DIR = 1;
*/
FT_DIR(0, 1),
/**
* FT_FILE = 2;
*/
FT_FILE(1, 2),
/**
* FT_SYMLINK = 3;
*/
FT_SYMLINK(2, 3),
;
/**
* FT_DIR = 1;
*/
public static final int FT_DIR_VALUE = 1;
/**
* FT_FILE = 2;
*/
public static final int FT_FILE_VALUE = 2;
/**
* FT_SYMLINK = 3;
*/
public static final int FT_SYMLINK_VALUE = 3;
public final int getNumber() { return value; }
public static FileType valueOf(int value) {
switch (value) {
case 1: return FT_DIR;
case 2: return FT_FILE;
case 3: return FT_SYMLINK;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public FileType findValueByNumber(int number) {
return FileType.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.FileStatusProto.getDescriptor().getEnumTypes().get(0);
}
private static final FileType[] VALUES = values();
public static FileType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private FileType(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.fs.FileStatusProto.FileType)
}
/**
* Protobuf enum {@code hadoop.fs.FileStatusProto.Flags}
*/
public enum Flags
implements com.google.protobuf.ProtocolMessageEnum {
/**
* HAS_ACL = 1;
*
*
*/
public com.google.protobuf.ByteString getEcData() {
return ecData_;
}
// optional uint32 flags = 18 [default = 0];
public static final int FLAGS_FIELD_NUMBER = 18;
private int flags_;
/**
* optional uint32 flags = 18 [default = 0];
*/
public boolean hasFlags() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional uint32 flags = 18 [default = 0];
*/
public int getFlags() {
return flags_;
}
private void initFields() {
fileType_ = org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType.FT_DIR;
path_ = "";
length_ = 0L;
permission_ = org.apache.hadoop.fs.FSProtos.FsPermissionProto.getDefaultInstance();
owner_ = "";
group_ = "";
modificationTime_ = 0L;
accessTime_ = 0L;
symlink_ = "";
blockReplication_ = 0;
blockSize_ = 0L;
encryptionData_ = com.google.protobuf.ByteString.EMPTY;
ecData_ = com.google.protobuf.ByteString.EMPTY;
flags_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFileType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPath()) {
memoizedIsInitialized = 0;
return false;
}
if (hasPermission()) {
if (!getPermission().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, fileType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getPathBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, length_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, permission_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getOwnerBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBytes(6, getGroupBytes());
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt64(7, modificationTime_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeUInt64(8, accessTime_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeBytes(9, getSymlinkBytes());
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeUInt32(10, blockReplication_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt64(11, blockSize_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeBytes(15, encryptionData_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeBytes(17, ecData_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
output.writeUInt32(18, flags_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, fileType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getPathBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, length_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, permission_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getOwnerBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(6, getGroupBytes());
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, modificationTime_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, accessTime_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(9, getSymlinkBytes());
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(10, blockReplication_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(11, blockSize_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(15, encryptionData_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(17, ecData_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(18, flags_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.fs.FSProtos.FileStatusProto)) {
return super.equals(obj);
}
org.apache.hadoop.fs.FSProtos.FileStatusProto other = (org.apache.hadoop.fs.FSProtos.FileStatusProto) obj;
boolean result = true;
result = result && (hasFileType() == other.hasFileType());
if (hasFileType()) {
result = result &&
(getFileType() == other.getFileType());
}
result = result && (hasPath() == other.hasPath());
if (hasPath()) {
result = result && getPath()
.equals(other.getPath());
}
result = result && (hasLength() == other.hasLength());
if (hasLength()) {
result = result && (getLength()
== other.getLength());
}
result = result && (hasPermission() == other.hasPermission());
if (hasPermission()) {
result = result && getPermission()
.equals(other.getPermission());
}
result = result && (hasOwner() == other.hasOwner());
if (hasOwner()) {
result = result && getOwner()
.equals(other.getOwner());
}
result = result && (hasGroup() == other.hasGroup());
if (hasGroup()) {
result = result && getGroup()
.equals(other.getGroup());
}
result = result && (hasModificationTime() == other.hasModificationTime());
if (hasModificationTime()) {
result = result && (getModificationTime()
== other.getModificationTime());
}
result = result && (hasAccessTime() == other.hasAccessTime());
if (hasAccessTime()) {
result = result && (getAccessTime()
== other.getAccessTime());
}
result = result && (hasSymlink() == other.hasSymlink());
if (hasSymlink()) {
result = result && getSymlink()
.equals(other.getSymlink());
}
result = result && (hasBlockReplication() == other.hasBlockReplication());
if (hasBlockReplication()) {
result = result && (getBlockReplication()
== other.getBlockReplication());
}
result = result && (hasBlockSize() == other.hasBlockSize());
if (hasBlockSize()) {
result = result && (getBlockSize()
== other.getBlockSize());
}
result = result && (hasEncryptionData() == other.hasEncryptionData());
if (hasEncryptionData()) {
result = result && getEncryptionData()
.equals(other.getEncryptionData());
}
result = result && (hasEcData() == other.hasEcData());
if (hasEcData()) {
result = result && getEcData()
.equals(other.getEcData());
}
result = result && (hasFlags() == other.hasFlags());
if (hasFlags()) {
result = result && (getFlags()
== other.getFlags());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFileType()) {
hash = (37 * hash) + FILETYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getFileType());
}
if (hasPath()) {
hash = (37 * hash) + PATH_FIELD_NUMBER;
hash = (53 * hash) + getPath().hashCode();
}
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLength());
}
if (hasPermission()) {
hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
hash = (53 * hash) + getPermission().hashCode();
}
if (hasOwner()) {
hash = (37 * hash) + OWNER_FIELD_NUMBER;
hash = (53 * hash) + getOwner().hashCode();
}
if (hasGroup()) {
hash = (37 * hash) + GROUP_FIELD_NUMBER;
hash = (53 * hash) + getGroup().hashCode();
}
if (hasModificationTime()) {
hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getModificationTime());
}
if (hasAccessTime()) {
hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getAccessTime());
}
if (hasSymlink()) {
hash = (37 * hash) + SYMLINK_FIELD_NUMBER;
hash = (53 * hash) + getSymlink().hashCode();
}
if (hasBlockReplication()) {
hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER;
hash = (53 * hash) + getBlockReplication();
}
if (hasBlockSize()) {
hash = (37 * hash) + BLOCK_SIZE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockSize());
}
if (hasEncryptionData()) {
hash = (37 * hash) + ENCRYPTION_DATA_FIELD_NUMBER;
hash = (53 * hash) + getEncryptionData().hashCode();
}
if (hasEcData()) {
hash = (37 * hash) + EC_DATA_FIELD_NUMBER;
hash = (53 * hash) + getEcData().hashCode();
}
if (hasFlags()) {
hash = (37 * hash) + FLAGS_FIELD_NUMBER;
hash = (53 * hash) + getFlags();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.fs.FSProtos.FileStatusProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.fs.FileStatusProto}
*
*
*
* FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
* cross-serialization is not an explicitly supported use case. Unlike HDFS,
* most fields are optional and do not define defaults.
*