org.apache.hadoop.fs.FSProtos Maven / Gradle / Ivy
The newest version!
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: FSProtos.proto
package org.apache.hadoop.fs;
public final class FSProtos {
private FSProtos() {}
public static void registerAllExtensions(
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
}
public interface FsPermissionProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.fs.FsPermissionProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
*
* UNIX-style mode bits
*
*
* required uint32 perm = 1;
* @return Whether the perm field is set.
*/
boolean hasPerm();
/**
*
* UNIX-style mode bits
*
*
* required uint32 perm = 1;
* @return The perm.
*/
int getPerm();
}
/**
* Protobuf type {@code hadoop.fs.FsPermissionProto}
*/
public static final class FsPermissionProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.fs.FsPermissionProto)
FsPermissionProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use FsPermissionProto.newBuilder() to construct.
private FsPermissionProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private FsPermissionProto() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new FsPermissionProto();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FsPermissionProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FsPermissionProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.fs.FSProtos.FsPermissionProto.class, org.apache.hadoop.fs.FSProtos.FsPermissionProto.Builder.class);
}
private int bitField0_;
public static final int PERM_FIELD_NUMBER = 1;
private int perm_ = 0;
/**
*
* UNIX-style mode bits
*
*
* required uint32 perm = 1;
* @return Whether the perm field is set.
*/
@java.lang.Override
public boolean hasPerm() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* UNIX-style mode bits
*
*
* required uint32 perm = 1;
* @return The perm.
*/
@java.lang.Override
public int getPerm() {
return perm_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasPerm()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt32(1, perm_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(1, perm_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.fs.FSProtos.FsPermissionProto)) {
return super.equals(obj);
}
org.apache.hadoop.fs.FSProtos.FsPermissionProto other = (org.apache.hadoop.fs.FSProtos.FsPermissionProto) obj;
if (hasPerm() != other.hasPerm()) return false;
if (hasPerm()) {
if (getPerm()
!= other.getPerm()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasPerm()) {
hash = (37 * hash) + PERM_FIELD_NUMBER;
hash = (53 * hash) + getPerm();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.fs.FSProtos.FsPermissionProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.fs.FsPermissionProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.fs.FsPermissionProto)
org.apache.hadoop.fs.FSProtos.FsPermissionProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FsPermissionProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FsPermissionProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.fs.FSProtos.FsPermissionProto.class, org.apache.hadoop.fs.FSProtos.FsPermissionProto.Builder.class);
}
// Construct using org.apache.hadoop.fs.FSProtos.FsPermissionProto.newBuilder()
private Builder() {
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
perm_ = 0;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FsPermissionProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FsPermissionProto getDefaultInstanceForType() {
return org.apache.hadoop.fs.FSProtos.FsPermissionProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FsPermissionProto build() {
org.apache.hadoop.fs.FSProtos.FsPermissionProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FsPermissionProto buildPartial() {
org.apache.hadoop.fs.FSProtos.FsPermissionProto result = new org.apache.hadoop.fs.FSProtos.FsPermissionProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.fs.FSProtos.FsPermissionProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.perm_ = perm_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.fs.FSProtos.FsPermissionProto) {
return mergeFrom((org.apache.hadoop.fs.FSProtos.FsPermissionProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.fs.FSProtos.FsPermissionProto other) {
if (other == org.apache.hadoop.fs.FSProtos.FsPermissionProto.getDefaultInstance()) return this;
if (other.hasPerm()) {
setPerm(other.getPerm());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasPerm()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
perm_ = input.readUInt32();
bitField0_ |= 0x00000001;
break;
} // case 8
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private int perm_ ;
/**
*
* UNIX-style mode bits
*
*
* required uint32 perm = 1;
* @return Whether the perm field is set.
*/
@java.lang.Override
public boolean hasPerm() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* UNIX-style mode bits
*
*
* required uint32 perm = 1;
* @return The perm.
*/
@java.lang.Override
public int getPerm() {
return perm_;
}
/**
*
* UNIX-style mode bits
*
*
* required uint32 perm = 1;
* @param value The perm to set.
* @return This builder for chaining.
*/
public Builder setPerm(int value) {
perm_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* UNIX-style mode bits
*
*
* required uint32 perm = 1;
* @return This builder for chaining.
*/
public Builder clearPerm() {
bitField0_ = (bitField0_ & ~0x00000001);
perm_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.fs.FsPermissionProto)
}
// @@protoc_insertion_point(class_scope:hadoop.fs.FsPermissionProto)
private static final org.apache.hadoop.fs.FSProtos.FsPermissionProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.fs.FSProtos.FsPermissionProto();
}
public static org.apache.hadoop.fs.FSProtos.FsPermissionProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public FsPermissionProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FsPermissionProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface FileStatusProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.fs.FileStatusProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.fs.FileStatusProto.FileType fileType = 1;
* @return Whether the fileType field is set.
*/
boolean hasFileType();
/**
* required .hadoop.fs.FileStatusProto.FileType fileType = 1;
* @return The fileType.
*/
org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType getFileType();
/**
* required string path = 2;
* @return Whether the path field is set.
*/
boolean hasPath();
/**
* required string path = 2;
* @return The path.
*/
java.lang.String getPath();
/**
* required string path = 2;
* @return The bytes for path.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getPathBytes();
/**
* optional uint64 length = 3;
* @return Whether the length field is set.
*/
boolean hasLength();
/**
* optional uint64 length = 3;
* @return The length.
*/
long getLength();
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
* @return Whether the permission field is set.
*/
boolean hasPermission();
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
* @return The permission.
*/
org.apache.hadoop.fs.FSProtos.FsPermissionProto getPermission();
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
org.apache.hadoop.fs.FSProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder();
/**
* optional string owner = 5;
* @return Whether the owner field is set.
*/
boolean hasOwner();
/**
* optional string owner = 5;
* @return The owner.
*/
java.lang.String getOwner();
/**
* optional string owner = 5;
* @return The bytes for owner.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getOwnerBytes();
/**
* optional string group = 6;
* @return Whether the group field is set.
*/
boolean hasGroup();
/**
* optional string group = 6;
* @return The group.
*/
java.lang.String getGroup();
/**
* optional string group = 6;
* @return The bytes for group.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getGroupBytes();
/**
* optional uint64 modification_time = 7;
* @return Whether the modificationTime field is set.
*/
boolean hasModificationTime();
/**
* optional uint64 modification_time = 7;
* @return The modificationTime.
*/
long getModificationTime();
/**
* optional uint64 access_time = 8;
* @return Whether the accessTime field is set.
*/
boolean hasAccessTime();
/**
* optional uint64 access_time = 8;
* @return The accessTime.
*/
long getAccessTime();
/**
* optional string symlink = 9;
* @return Whether the symlink field is set.
*/
boolean hasSymlink();
/**
* optional string symlink = 9;
* @return The symlink.
*/
java.lang.String getSymlink();
/**
* optional string symlink = 9;
* @return The bytes for symlink.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getSymlinkBytes();
/**
* optional uint32 block_replication = 10;
* @return Whether the blockReplication field is set.
*/
boolean hasBlockReplication();
/**
* optional uint32 block_replication = 10;
* @return The blockReplication.
*/
int getBlockReplication();
/**
* optional uint64 block_size = 11;
* @return Whether the blockSize field is set.
*/
boolean hasBlockSize();
/**
* optional uint64 block_size = 11;
* @return The blockSize.
*/
long getBlockSize();
/**
*
* locations = 12
* alias = 13
* childrenNum = 14
*
*
* optional bytes encryption_data = 15;
* @return Whether the encryptionData field is set.
*/
boolean hasEncryptionData();
/**
*
* locations = 12
* alias = 13
* childrenNum = 14
*
*
* optional bytes encryption_data = 15;
* @return The encryptionData.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionData();
/**
*
* storagePolicy = 16
*
*
* optional bytes ec_data = 17;
* @return Whether the ecData field is set.
*/
boolean hasEcData();
/**
*
* storagePolicy = 16
*
*
* optional bytes ec_data = 17;
* @return The ecData.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString getEcData();
/**
* optional uint32 flags = 18 [default = 0];
* @return Whether the flags field is set.
*/
boolean hasFlags();
/**
* optional uint32 flags = 18 [default = 0];
* @return The flags.
*/
int getFlags();
}
/**
*
*
* FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
* cross-serialization is not an explicitly supported use case. Unlike HDFS,
* most fields are optional and do not define defaults.
*
*
* Protobuf type {@code hadoop.fs.FileStatusProto}
*/
public static final class FileStatusProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.fs.FileStatusProto)
FileStatusProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use FileStatusProto.newBuilder() to construct.
private FileStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private FileStatusProto() {
fileType_ = 1;
path_ = "";
owner_ = "";
group_ = "";
symlink_ = "";
encryptionData_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
ecData_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new FileStatusProto();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FileStatusProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FileStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.fs.FSProtos.FileStatusProto.class, org.apache.hadoop.fs.FSProtos.FileStatusProto.Builder.class);
}
/**
* Protobuf enum {@code hadoop.fs.FileStatusProto.FileType}
*/
public enum FileType
implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
/**
* FT_DIR = 1;
*/
FT_DIR(1),
/**
* FT_FILE = 2;
*/
FT_FILE(2),
/**
* FT_SYMLINK = 3;
*/
FT_SYMLINK(3),
;
/**
* FT_DIR = 1;
*/
public static final int FT_DIR_VALUE = 1;
/**
* FT_FILE = 2;
*/
public static final int FT_FILE_VALUE = 2;
/**
* FT_SYMLINK = 3;
*/
public static final int FT_SYMLINK_VALUE = 3;
public final int getNumber() {
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static FileType valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static FileType forNumber(int value) {
switch (value) {
case 1: return FT_DIR;
case 2: return FT_FILE;
case 3: return FT_SYMLINK;
default: return null;
}
}
public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
FileType> internalValueMap =
new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
public FileType findValueByNumber(int number) {
return FileType.forNumber(number);
}
};
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.FileStatusProto.getDescriptor().getEnumTypes().get(0);
}
private static final FileType[] VALUES = values();
public static FileType valueOf(
org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private FileType(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.fs.FileStatusProto.FileType)
}
/**
* Protobuf enum {@code hadoop.fs.FileStatusProto.Flags}
*/
public enum Flags
implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
/**
*
* has ACLs
*
*
* HAS_ACL = 1;
*/
HAS_ACL(1),
/**
*
* encrypted
*
*
* HAS_CRYPT = 2;
*/
HAS_CRYPT(2),
/**
*
* erasure coded
*
*
* HAS_EC = 4;
*/
HAS_EC(4),
/**
*
* snapshot enabled
*
*
* SNAPSHOT_ENABLED = 8;
*/
SNAPSHOT_ENABLED(8),
;
/**
*
* has ACLs
*
*
* HAS_ACL = 1;
*/
public static final int HAS_ACL_VALUE = 1;
/**
*
* encrypted
*
*
* HAS_CRYPT = 2;
*/
public static final int HAS_CRYPT_VALUE = 2;
/**
*
* erasure coded
*
*
* HAS_EC = 4;
*/
public static final int HAS_EC_VALUE = 4;
/**
*
* snapshot enabled
*
*
* SNAPSHOT_ENABLED = 8;
*/
public static final int SNAPSHOT_ENABLED_VALUE = 8;
public final int getNumber() {
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static Flags valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static Flags forNumber(int value) {
switch (value) {
case 1: return HAS_ACL;
case 2: return HAS_CRYPT;
case 4: return HAS_EC;
case 8: return SNAPSHOT_ENABLED;
default: return null;
}
}
public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
Flags> internalValueMap =
new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap() {
public Flags findValueByNumber(int number) {
return Flags.forNumber(number);
}
};
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.FileStatusProto.getDescriptor().getEnumTypes().get(1);
}
private static final Flags[] VALUES = values();
public static Flags valueOf(
org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private Flags(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.fs.FileStatusProto.Flags)
}
private int bitField0_;
public static final int FILETYPE_FIELD_NUMBER = 1;
private int fileType_ = 1;
/**
* required .hadoop.fs.FileStatusProto.FileType fileType = 1;
* @return Whether the fileType field is set.
*/
@java.lang.Override public boolean hasFileType() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.fs.FileStatusProto.FileType fileType = 1;
* @return The fileType.
*/
@java.lang.Override public org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType getFileType() {
org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType result = org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType.forNumber(fileType_);
return result == null ? org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType.FT_DIR : result;
}
public static final int PATH_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private volatile java.lang.Object path_ = "";
/**
* required string path = 2;
* @return Whether the path field is set.
*/
@java.lang.Override
public boolean hasPath() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string path = 2;
* @return The path.
*/
@java.lang.Override
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
path_ = s;
}
return s;
}
}
/**
* required string path = 2;
* @return The bytes for path.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int LENGTH_FIELD_NUMBER = 3;
private long length_ = 0L;
/**
* optional uint64 length = 3;
* @return Whether the length field is set.
*/
@java.lang.Override
public boolean hasLength() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional uint64 length = 3;
* @return The length.
*/
@java.lang.Override
public long getLength() {
return length_;
}
public static final int PERMISSION_FIELD_NUMBER = 4;
private org.apache.hadoop.fs.FSProtos.FsPermissionProto permission_;
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
* @return Whether the permission field is set.
*/
@java.lang.Override
public boolean hasPermission() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
* @return The permission.
*/
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FsPermissionProto getPermission() {
return permission_ == null ? org.apache.hadoop.fs.FSProtos.FsPermissionProto.getDefaultInstance() : permission_;
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
return permission_ == null ? org.apache.hadoop.fs.FSProtos.FsPermissionProto.getDefaultInstance() : permission_;
}
public static final int OWNER_FIELD_NUMBER = 5;
@SuppressWarnings("serial")
private volatile java.lang.Object owner_ = "";
/**
* optional string owner = 5;
* @return Whether the owner field is set.
*/
@java.lang.Override
public boolean hasOwner() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional string owner = 5;
* @return The owner.
*/
@java.lang.Override
public java.lang.String getOwner() {
java.lang.Object ref = owner_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
owner_ = s;
}
return s;
}
}
/**
* optional string owner = 5;
* @return The bytes for owner.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getOwnerBytes() {
java.lang.Object ref = owner_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
owner_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int GROUP_FIELD_NUMBER = 6;
@SuppressWarnings("serial")
private volatile java.lang.Object group_ = "";
/**
* optional string group = 6;
* @return Whether the group field is set.
*/
@java.lang.Override
public boolean hasGroup() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* optional string group = 6;
* @return The group.
*/
@java.lang.Override
public java.lang.String getGroup() {
java.lang.Object ref = group_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
group_ = s;
}
return s;
}
}
/**
* optional string group = 6;
* @return The bytes for group.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getGroupBytes() {
java.lang.Object ref = group_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
group_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int MODIFICATION_TIME_FIELD_NUMBER = 7;
private long modificationTime_ = 0L;
/**
* optional uint64 modification_time = 7;
* @return Whether the modificationTime field is set.
*/
@java.lang.Override
public boolean hasModificationTime() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* optional uint64 modification_time = 7;
* @return The modificationTime.
*/
@java.lang.Override
public long getModificationTime() {
return modificationTime_;
}
public static final int ACCESS_TIME_FIELD_NUMBER = 8;
private long accessTime_ = 0L;
/**
* optional uint64 access_time = 8;
* @return Whether the accessTime field is set.
*/
@java.lang.Override
public boolean hasAccessTime() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
* optional uint64 access_time = 8;
* @return The accessTime.
*/
@java.lang.Override
public long getAccessTime() {
return accessTime_;
}
public static final int SYMLINK_FIELD_NUMBER = 9;
@SuppressWarnings("serial")
private volatile java.lang.Object symlink_ = "";
/**
* optional string symlink = 9;
* @return Whether the symlink field is set.
*/
@java.lang.Override
public boolean hasSymlink() {
return ((bitField0_ & 0x00000100) != 0);
}
/**
* optional string symlink = 9;
* @return The symlink.
*/
@java.lang.Override
public java.lang.String getSymlink() {
java.lang.Object ref = symlink_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
symlink_ = s;
}
return s;
}
}
/**
* optional string symlink = 9;
* @return The bytes for symlink.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getSymlinkBytes() {
java.lang.Object ref = symlink_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
symlink_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10;
private int blockReplication_ = 0;
/**
* optional uint32 block_replication = 10;
* @return Whether the blockReplication field is set.
*/
@java.lang.Override
public boolean hasBlockReplication() {
return ((bitField0_ & 0x00000200) != 0);
}
/**
* optional uint32 block_replication = 10;
* @return The blockReplication.
*/
@java.lang.Override
public int getBlockReplication() {
return blockReplication_;
}
public static final int BLOCK_SIZE_FIELD_NUMBER = 11;
private long blockSize_ = 0L;
/**
* optional uint64 block_size = 11;
* @return Whether the blockSize field is set.
*/
@java.lang.Override
public boolean hasBlockSize() {
return ((bitField0_ & 0x00000400) != 0);
}
/**
* optional uint64 block_size = 11;
* @return The blockSize.
*/
@java.lang.Override
public long getBlockSize() {
return blockSize_;
}
public static final int ENCRYPTION_DATA_FIELD_NUMBER = 15;
private org.apache.hadoop.thirdparty.protobuf.ByteString encryptionData_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
*
* locations = 12
* alias = 13
* childrenNum = 14
*
*
* optional bytes encryption_data = 15;
* @return Whether the encryptionData field is set.
*/
@java.lang.Override
public boolean hasEncryptionData() {
return ((bitField0_ & 0x00000800) != 0);
}
/**
*
* locations = 12
* alias = 13
* childrenNum = 14
*
*
* optional bytes encryption_data = 15;
* @return The encryptionData.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionData() {
return encryptionData_;
}
public static final int EC_DATA_FIELD_NUMBER = 17;
private org.apache.hadoop.thirdparty.protobuf.ByteString ecData_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
*
* storagePolicy = 16
*
*
* optional bytes ec_data = 17;
* @return Whether the ecData field is set.
*/
@java.lang.Override
public boolean hasEcData() {
return ((bitField0_ & 0x00001000) != 0);
}
/**
*
* storagePolicy = 16
*
*
* optional bytes ec_data = 17;
* @return The ecData.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString getEcData() {
return ecData_;
}
public static final int FLAGS_FIELD_NUMBER = 18;
private int flags_ = 0;
/**
* optional uint32 flags = 18 [default = 0];
* @return Whether the flags field is set.
*/
@java.lang.Override
public boolean hasFlags() {
return ((bitField0_ & 0x00002000) != 0);
}
/**
* optional uint32 flags = 18 [default = 0];
* @return The flags.
*/
@java.lang.Override
public int getFlags() {
return flags_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasFileType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPath()) {
memoizedIsInitialized = 0;
return false;
}
if (hasPermission()) {
if (!getPermission().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeEnum(1, fileType_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, path_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeUInt64(3, length_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeMessage(4, getPermission());
}
if (((bitField0_ & 0x00000010) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, owner_);
}
if (((bitField0_ & 0x00000020) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, group_);
}
if (((bitField0_ & 0x00000040) != 0)) {
output.writeUInt64(7, modificationTime_);
}
if (((bitField0_ & 0x00000080) != 0)) {
output.writeUInt64(8, accessTime_);
}
if (((bitField0_ & 0x00000100) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 9, symlink_);
}
if (((bitField0_ & 0x00000200) != 0)) {
output.writeUInt32(10, blockReplication_);
}
if (((bitField0_ & 0x00000400) != 0)) {
output.writeUInt64(11, blockSize_);
}
if (((bitField0_ & 0x00000800) != 0)) {
output.writeBytes(15, encryptionData_);
}
if (((bitField0_ & 0x00001000) != 0)) {
output.writeBytes(17, ecData_);
}
if (((bitField0_ & 0x00002000) != 0)) {
output.writeUInt32(18, flags_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeEnumSize(1, fileType_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, path_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(3, length_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeMessageSize(4, getPermission());
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, owner_);
}
if (((bitField0_ & 0x00000020) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, group_);
}
if (((bitField0_ & 0x00000040) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(7, modificationTime_);
}
if (((bitField0_ & 0x00000080) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(8, accessTime_);
}
if (((bitField0_ & 0x00000100) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(9, symlink_);
}
if (((bitField0_ & 0x00000200) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(10, blockReplication_);
}
if (((bitField0_ & 0x00000400) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(11, blockSize_);
}
if (((bitField0_ & 0x00000800) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBytesSize(15, encryptionData_);
}
if (((bitField0_ & 0x00001000) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeBytesSize(17, ecData_);
}
if (((bitField0_ & 0x00002000) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt32Size(18, flags_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.fs.FSProtos.FileStatusProto)) {
return super.equals(obj);
}
org.apache.hadoop.fs.FSProtos.FileStatusProto other = (org.apache.hadoop.fs.FSProtos.FileStatusProto) obj;
if (hasFileType() != other.hasFileType()) return false;
if (hasFileType()) {
if (fileType_ != other.fileType_) return false;
}
if (hasPath() != other.hasPath()) return false;
if (hasPath()) {
if (!getPath()
.equals(other.getPath())) return false;
}
if (hasLength() != other.hasLength()) return false;
if (hasLength()) {
if (getLength()
!= other.getLength()) return false;
}
if (hasPermission() != other.hasPermission()) return false;
if (hasPermission()) {
if (!getPermission()
.equals(other.getPermission())) return false;
}
if (hasOwner() != other.hasOwner()) return false;
if (hasOwner()) {
if (!getOwner()
.equals(other.getOwner())) return false;
}
if (hasGroup() != other.hasGroup()) return false;
if (hasGroup()) {
if (!getGroup()
.equals(other.getGroup())) return false;
}
if (hasModificationTime() != other.hasModificationTime()) return false;
if (hasModificationTime()) {
if (getModificationTime()
!= other.getModificationTime()) return false;
}
if (hasAccessTime() != other.hasAccessTime()) return false;
if (hasAccessTime()) {
if (getAccessTime()
!= other.getAccessTime()) return false;
}
if (hasSymlink() != other.hasSymlink()) return false;
if (hasSymlink()) {
if (!getSymlink()
.equals(other.getSymlink())) return false;
}
if (hasBlockReplication() != other.hasBlockReplication()) return false;
if (hasBlockReplication()) {
if (getBlockReplication()
!= other.getBlockReplication()) return false;
}
if (hasBlockSize() != other.hasBlockSize()) return false;
if (hasBlockSize()) {
if (getBlockSize()
!= other.getBlockSize()) return false;
}
if (hasEncryptionData() != other.hasEncryptionData()) return false;
if (hasEncryptionData()) {
if (!getEncryptionData()
.equals(other.getEncryptionData())) return false;
}
if (hasEcData() != other.hasEcData()) return false;
if (hasEcData()) {
if (!getEcData()
.equals(other.getEcData())) return false;
}
if (hasFlags() != other.hasFlags()) return false;
if (hasFlags()) {
if (getFlags()
!= other.getFlags()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasFileType()) {
hash = (37 * hash) + FILETYPE_FIELD_NUMBER;
hash = (53 * hash) + fileType_;
}
if (hasPath()) {
hash = (37 * hash) + PATH_FIELD_NUMBER;
hash = (53 * hash) + getPath().hashCode();
}
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getLength());
}
if (hasPermission()) {
hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
hash = (53 * hash) + getPermission().hashCode();
}
if (hasOwner()) {
hash = (37 * hash) + OWNER_FIELD_NUMBER;
hash = (53 * hash) + getOwner().hashCode();
}
if (hasGroup()) {
hash = (37 * hash) + GROUP_FIELD_NUMBER;
hash = (53 * hash) + getGroup().hashCode();
}
if (hasModificationTime()) {
hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getModificationTime());
}
if (hasAccessTime()) {
hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getAccessTime());
}
if (hasSymlink()) {
hash = (37 * hash) + SYMLINK_FIELD_NUMBER;
hash = (53 * hash) + getSymlink().hashCode();
}
if (hasBlockReplication()) {
hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER;
hash = (53 * hash) + getBlockReplication();
}
if (hasBlockSize()) {
hash = (37 * hash) + BLOCK_SIZE_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getBlockSize());
}
if (hasEncryptionData()) {
hash = (37 * hash) + ENCRYPTION_DATA_FIELD_NUMBER;
hash = (53 * hash) + getEncryptionData().hashCode();
}
if (hasEcData()) {
hash = (37 * hash) + EC_DATA_FIELD_NUMBER;
hash = (53 * hash) + getEcData().hashCode();
}
if (hasFlags()) {
hash = (37 * hash) + FLAGS_FIELD_NUMBER;
hash = (53 * hash) + getFlags();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.fs.FSProtos.FileStatusProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
*
* FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
* cross-serialization is not an explicitly supported use case. Unlike HDFS,
* most fields are optional and do not define defaults.
*
*
* Protobuf type {@code hadoop.fs.FileStatusProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.fs.FileStatusProto)
org.apache.hadoop.fs.FSProtos.FileStatusProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FileStatusProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FileStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.fs.FSProtos.FileStatusProto.class, org.apache.hadoop.fs.FSProtos.FileStatusProto.Builder.class);
}
// Construct using org.apache.hadoop.fs.FSProtos.FileStatusProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getPermissionFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
fileType_ = 1;
path_ = "";
length_ = 0L;
permission_ = null;
if (permissionBuilder_ != null) {
permissionBuilder_.dispose();
permissionBuilder_ = null;
}
owner_ = "";
group_ = "";
modificationTime_ = 0L;
accessTime_ = 0L;
symlink_ = "";
blockReplication_ = 0;
blockSize_ = 0L;
encryptionData_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
ecData_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
flags_ = 0;
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_FileStatusProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FileStatusProto getDefaultInstanceForType() {
return org.apache.hadoop.fs.FSProtos.FileStatusProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FileStatusProto build() {
org.apache.hadoop.fs.FSProtos.FileStatusProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FileStatusProto buildPartial() {
org.apache.hadoop.fs.FSProtos.FileStatusProto result = new org.apache.hadoop.fs.FSProtos.FileStatusProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.fs.FSProtos.FileStatusProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.fileType_ = fileType_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.path_ = path_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.length_ = length_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.permission_ = permissionBuilder_ == null
? permission_
: permissionBuilder_.build();
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.owner_ = owner_;
to_bitField0_ |= 0x00000010;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
result.group_ = group_;
to_bitField0_ |= 0x00000020;
}
if (((from_bitField0_ & 0x00000040) != 0)) {
result.modificationTime_ = modificationTime_;
to_bitField0_ |= 0x00000040;
}
if (((from_bitField0_ & 0x00000080) != 0)) {
result.accessTime_ = accessTime_;
to_bitField0_ |= 0x00000080;
}
if (((from_bitField0_ & 0x00000100) != 0)) {
result.symlink_ = symlink_;
to_bitField0_ |= 0x00000100;
}
if (((from_bitField0_ & 0x00000200) != 0)) {
result.blockReplication_ = blockReplication_;
to_bitField0_ |= 0x00000200;
}
if (((from_bitField0_ & 0x00000400) != 0)) {
result.blockSize_ = blockSize_;
to_bitField0_ |= 0x00000400;
}
if (((from_bitField0_ & 0x00000800) != 0)) {
result.encryptionData_ = encryptionData_;
to_bitField0_ |= 0x00000800;
}
if (((from_bitField0_ & 0x00001000) != 0)) {
result.ecData_ = ecData_;
to_bitField0_ |= 0x00001000;
}
if (((from_bitField0_ & 0x00002000) != 0)) {
result.flags_ = flags_;
to_bitField0_ |= 0x00002000;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.fs.FSProtos.FileStatusProto) {
return mergeFrom((org.apache.hadoop.fs.FSProtos.FileStatusProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.fs.FSProtos.FileStatusProto other) {
if (other == org.apache.hadoop.fs.FSProtos.FileStatusProto.getDefaultInstance()) return this;
if (other.hasFileType()) {
setFileType(other.getFileType());
}
if (other.hasPath()) {
path_ = other.path_;
bitField0_ |= 0x00000002;
onChanged();
}
if (other.hasLength()) {
setLength(other.getLength());
}
if (other.hasPermission()) {
mergePermission(other.getPermission());
}
if (other.hasOwner()) {
owner_ = other.owner_;
bitField0_ |= 0x00000010;
onChanged();
}
if (other.hasGroup()) {
group_ = other.group_;
bitField0_ |= 0x00000020;
onChanged();
}
if (other.hasModificationTime()) {
setModificationTime(other.getModificationTime());
}
if (other.hasAccessTime()) {
setAccessTime(other.getAccessTime());
}
if (other.hasSymlink()) {
symlink_ = other.symlink_;
bitField0_ |= 0x00000100;
onChanged();
}
if (other.hasBlockReplication()) {
setBlockReplication(other.getBlockReplication());
}
if (other.hasBlockSize()) {
setBlockSize(other.getBlockSize());
}
if (other.hasEncryptionData()) {
setEncryptionData(other.getEncryptionData());
}
if (other.hasEcData()) {
setEcData(other.getEcData());
}
if (other.hasFlags()) {
setFlags(other.getFlags());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasFileType()) {
return false;
}
if (!hasPath()) {
return false;
}
if (hasPermission()) {
if (!getPermission().isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int tmpRaw = input.readEnum();
org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType tmpValue =
org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(1, tmpRaw);
} else {
fileType_ = tmpRaw;
bitField0_ |= 0x00000001;
}
break;
} // case 8
case 18: {
path_ = input.readBytes();
bitField0_ |= 0x00000002;
break;
} // case 18
case 24: {
length_ = input.readUInt64();
bitField0_ |= 0x00000004;
break;
} // case 24
case 34: {
input.readMessage(
getPermissionFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000008;
break;
} // case 34
case 42: {
owner_ = input.readBytes();
bitField0_ |= 0x00000010;
break;
} // case 42
case 50: {
group_ = input.readBytes();
bitField0_ |= 0x00000020;
break;
} // case 50
case 56: {
modificationTime_ = input.readUInt64();
bitField0_ |= 0x00000040;
break;
} // case 56
case 64: {
accessTime_ = input.readUInt64();
bitField0_ |= 0x00000080;
break;
} // case 64
case 74: {
symlink_ = input.readBytes();
bitField0_ |= 0x00000100;
break;
} // case 74
case 80: {
blockReplication_ = input.readUInt32();
bitField0_ |= 0x00000200;
break;
} // case 80
case 88: {
blockSize_ = input.readUInt64();
bitField0_ |= 0x00000400;
break;
} // case 88
case 122: {
encryptionData_ = input.readBytes();
bitField0_ |= 0x00000800;
break;
} // case 122
case 138: {
ecData_ = input.readBytes();
bitField0_ |= 0x00001000;
break;
} // case 138
case 144: {
flags_ = input.readUInt32();
bitField0_ |= 0x00002000;
break;
} // case 144
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private int fileType_ = 1;
/**
* required .hadoop.fs.FileStatusProto.FileType fileType = 1;
* @return Whether the fileType field is set.
*/
@java.lang.Override public boolean hasFileType() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.fs.FileStatusProto.FileType fileType = 1;
* @return The fileType.
*/
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType getFileType() {
org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType result = org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType.forNumber(fileType_);
return result == null ? org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType.FT_DIR : result;
}
/**
* required .hadoop.fs.FileStatusProto.FileType fileType = 1;
* @param value The fileType to set.
* @return This builder for chaining.
*/
public Builder setFileType(org.apache.hadoop.fs.FSProtos.FileStatusProto.FileType value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
fileType_ = value.getNumber();
onChanged();
return this;
}
/**
* required .hadoop.fs.FileStatusProto.FileType fileType = 1;
* @return This builder for chaining.
*/
public Builder clearFileType() {
bitField0_ = (bitField0_ & ~0x00000001);
fileType_ = 1;
onChanged();
return this;
}
private java.lang.Object path_ = "";
/**
* required string path = 2;
* @return Whether the path field is set.
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string path = 2;
* @return The path.
*/
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
path_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string path = 2;
* @return The bytes for path.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string path = 2;
* @param value The path to set.
* @return This builder for chaining.
*/
public Builder setPath(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
path_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* required string path = 2;
* @return This builder for chaining.
*/
public Builder clearPath() {
path_ = getDefaultInstance().getPath();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
* required string path = 2;
* @param value The bytes for path to set.
* @return This builder for chaining.
*/
public Builder setPathBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
path_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
private long length_ ;
/**
* optional uint64 length = 3;
* @return Whether the length field is set.
*/
@java.lang.Override
public boolean hasLength() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional uint64 length = 3;
* @return The length.
*/
@java.lang.Override
public long getLength() {
return length_;
}
/**
* optional uint64 length = 3;
* @param value The length to set.
* @return This builder for chaining.
*/
public Builder setLength(long value) {
length_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
* optional uint64 length = 3;
* @return This builder for chaining.
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000004);
length_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.fs.FSProtos.FsPermissionProto permission_;
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.fs.FSProtos.FsPermissionProto, org.apache.hadoop.fs.FSProtos.FsPermissionProto.Builder, org.apache.hadoop.fs.FSProtos.FsPermissionProtoOrBuilder> permissionBuilder_;
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
* @return Whether the permission field is set.
*/
public boolean hasPermission() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
* @return The permission.
*/
public org.apache.hadoop.fs.FSProtos.FsPermissionProto getPermission() {
if (permissionBuilder_ == null) {
return permission_ == null ? org.apache.hadoop.fs.FSProtos.FsPermissionProto.getDefaultInstance() : permission_;
} else {
return permissionBuilder_.getMessage();
}
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
public Builder setPermission(org.apache.hadoop.fs.FSProtos.FsPermissionProto value) {
if (permissionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
permission_ = value;
} else {
permissionBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
public Builder setPermission(
org.apache.hadoop.fs.FSProtos.FsPermissionProto.Builder builderForValue) {
if (permissionBuilder_ == null) {
permission_ = builderForValue.build();
} else {
permissionBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
public Builder mergePermission(org.apache.hadoop.fs.FSProtos.FsPermissionProto value) {
if (permissionBuilder_ == null) {
if (((bitField0_ & 0x00000008) != 0) &&
permission_ != null &&
permission_ != org.apache.hadoop.fs.FSProtos.FsPermissionProto.getDefaultInstance()) {
getPermissionBuilder().mergeFrom(value);
} else {
permission_ = value;
}
} else {
permissionBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
public Builder clearPermission() {
bitField0_ = (bitField0_ & ~0x00000008);
permission_ = null;
if (permissionBuilder_ != null) {
permissionBuilder_.dispose();
permissionBuilder_ = null;
}
onChanged();
return this;
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.fs.FSProtos.FsPermissionProto.Builder getPermissionBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getPermissionFieldBuilder().getBuilder();
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.fs.FSProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
if (permissionBuilder_ != null) {
return permissionBuilder_.getMessageOrBuilder();
} else {
return permission_ == null ?
org.apache.hadoop.fs.FSProtos.FsPermissionProto.getDefaultInstance() : permission_;
}
}
/**
* optional .hadoop.fs.FsPermissionProto permission = 4;
*/
private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.fs.FSProtos.FsPermissionProto, org.apache.hadoop.fs.FSProtos.FsPermissionProto.Builder, org.apache.hadoop.fs.FSProtos.FsPermissionProtoOrBuilder>
getPermissionFieldBuilder() {
if (permissionBuilder_ == null) {
permissionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.fs.FSProtos.FsPermissionProto, org.apache.hadoop.fs.FSProtos.FsPermissionProto.Builder, org.apache.hadoop.fs.FSProtos.FsPermissionProtoOrBuilder>(
getPermission(),
getParentForChildren(),
isClean());
permission_ = null;
}
return permissionBuilder_;
}
private java.lang.Object owner_ = "";
/**
* optional string owner = 5;
* @return Whether the owner field is set.
*/
public boolean hasOwner() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* optional string owner = 5;
* @return The owner.
*/
public java.lang.String getOwner() {
java.lang.Object ref = owner_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
owner_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string owner = 5;
* @return The bytes for owner.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getOwnerBytes() {
java.lang.Object ref = owner_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
owner_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string owner = 5;
* @param value The owner to set.
* @return This builder for chaining.
*/
public Builder setOwner(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
owner_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
* optional string owner = 5;
* @return This builder for chaining.
*/
public Builder clearOwner() {
owner_ = getDefaultInstance().getOwner();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
return this;
}
/**
* optional string owner = 5;
* @param value The bytes for owner to set.
* @return This builder for chaining.
*/
public Builder setOwnerBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
owner_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
private java.lang.Object group_ = "";
/**
* optional string group = 6;
* @return Whether the group field is set.
*/
public boolean hasGroup() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* optional string group = 6;
* @return The group.
*/
public java.lang.String getGroup() {
java.lang.Object ref = group_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
group_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string group = 6;
* @return The bytes for group.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getGroupBytes() {
java.lang.Object ref = group_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
group_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string group = 6;
* @param value The group to set.
* @return This builder for chaining.
*/
public Builder setGroup(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
group_ = value;
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
* optional string group = 6;
* @return This builder for chaining.
*/
public Builder clearGroup() {
group_ = getDefaultInstance().getGroup();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
return this;
}
/**
* optional string group = 6;
* @param value The bytes for group to set.
* @return This builder for chaining.
*/
public Builder setGroupBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
group_ = value;
bitField0_ |= 0x00000020;
onChanged();
return this;
}
private long modificationTime_ ;
/**
* optional uint64 modification_time = 7;
* @return Whether the modificationTime field is set.
*/
@java.lang.Override
public boolean hasModificationTime() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* optional uint64 modification_time = 7;
* @return The modificationTime.
*/
@java.lang.Override
public long getModificationTime() {
return modificationTime_;
}
/**
* optional uint64 modification_time = 7;
* @param value The modificationTime to set.
* @return This builder for chaining.
*/
public Builder setModificationTime(long value) {
modificationTime_ = value;
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
* optional uint64 modification_time = 7;
* @return This builder for chaining.
*/
public Builder clearModificationTime() {
bitField0_ = (bitField0_ & ~0x00000040);
modificationTime_ = 0L;
onChanged();
return this;
}
private long accessTime_ ;
/**
* optional uint64 access_time = 8;
* @return Whether the accessTime field is set.
*/
@java.lang.Override
public boolean hasAccessTime() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
* optional uint64 access_time = 8;
* @return The accessTime.
*/
@java.lang.Override
public long getAccessTime() {
return accessTime_;
}
/**
* optional uint64 access_time = 8;
* @param value The accessTime to set.
* @return This builder for chaining.
*/
public Builder setAccessTime(long value) {
accessTime_ = value;
bitField0_ |= 0x00000080;
onChanged();
return this;
}
/**
* optional uint64 access_time = 8;
* @return This builder for chaining.
*/
public Builder clearAccessTime() {
bitField0_ = (bitField0_ & ~0x00000080);
accessTime_ = 0L;
onChanged();
return this;
}
private java.lang.Object symlink_ = "";
/**
* optional string symlink = 9;
* @return Whether the symlink field is set.
*/
public boolean hasSymlink() {
return ((bitField0_ & 0x00000100) != 0);
}
/**
* optional string symlink = 9;
* @return The symlink.
*/
public java.lang.String getSymlink() {
java.lang.Object ref = symlink_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
symlink_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string symlink = 9;
* @return The bytes for symlink.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getSymlinkBytes() {
java.lang.Object ref = symlink_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
symlink_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string symlink = 9;
* @param value The symlink to set.
* @return This builder for chaining.
*/
public Builder setSymlink(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
symlink_ = value;
bitField0_ |= 0x00000100;
onChanged();
return this;
}
/**
* optional string symlink = 9;
* @return This builder for chaining.
*/
public Builder clearSymlink() {
symlink_ = getDefaultInstance().getSymlink();
bitField0_ = (bitField0_ & ~0x00000100);
onChanged();
return this;
}
/**
* optional string symlink = 9;
* @param value The bytes for symlink to set.
* @return This builder for chaining.
*/
public Builder setSymlinkBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
symlink_ = value;
bitField0_ |= 0x00000100;
onChanged();
return this;
}
private int blockReplication_ ;
/**
* optional uint32 block_replication = 10;
* @return Whether the blockReplication field is set.
*/
@java.lang.Override
public boolean hasBlockReplication() {
return ((bitField0_ & 0x00000200) != 0);
}
/**
* optional uint32 block_replication = 10;
* @return The blockReplication.
*/
@java.lang.Override
public int getBlockReplication() {
return blockReplication_;
}
/**
* optional uint32 block_replication = 10;
* @param value The blockReplication to set.
* @return This builder for chaining.
*/
public Builder setBlockReplication(int value) {
blockReplication_ = value;
bitField0_ |= 0x00000200;
onChanged();
return this;
}
/**
* optional uint32 block_replication = 10;
* @return This builder for chaining.
*/
public Builder clearBlockReplication() {
bitField0_ = (bitField0_ & ~0x00000200);
blockReplication_ = 0;
onChanged();
return this;
}
private long blockSize_ ;
/**
* optional uint64 block_size = 11;
* @return Whether the blockSize field is set.
*/
@java.lang.Override
public boolean hasBlockSize() {
return ((bitField0_ & 0x00000400) != 0);
}
/**
* optional uint64 block_size = 11;
* @return The blockSize.
*/
@java.lang.Override
public long getBlockSize() {
return blockSize_;
}
/**
* optional uint64 block_size = 11;
* @param value The blockSize to set.
* @return This builder for chaining.
*/
public Builder setBlockSize(long value) {
blockSize_ = value;
bitField0_ |= 0x00000400;
onChanged();
return this;
}
/**
* optional uint64 block_size = 11;
* @return This builder for chaining.
*/
public Builder clearBlockSize() {
bitField0_ = (bitField0_ & ~0x00000400);
blockSize_ = 0L;
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.ByteString encryptionData_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
*
* locations = 12
* alias = 13
* childrenNum = 14
*
*
* optional bytes encryption_data = 15;
* @return Whether the encryptionData field is set.
*/
@java.lang.Override
public boolean hasEncryptionData() {
return ((bitField0_ & 0x00000800) != 0);
}
/**
*
* locations = 12
* alias = 13
* childrenNum = 14
*
*
* optional bytes encryption_data = 15;
* @return The encryptionData.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionData() {
return encryptionData_;
}
/**
*
* locations = 12
* alias = 13
* childrenNum = 14
*
*
* optional bytes encryption_data = 15;
* @param value The encryptionData to set.
* @return This builder for chaining.
*/
public Builder setEncryptionData(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
encryptionData_ = value;
bitField0_ |= 0x00000800;
onChanged();
return this;
}
/**
*
* locations = 12
* alias = 13
* childrenNum = 14
*
*
* optional bytes encryption_data = 15;
* @return This builder for chaining.
*/
public Builder clearEncryptionData() {
bitField0_ = (bitField0_ & ~0x00000800);
encryptionData_ = getDefaultInstance().getEncryptionData();
onChanged();
return this;
}
private org.apache.hadoop.thirdparty.protobuf.ByteString ecData_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
/**
*
* storagePolicy = 16
*
*
* optional bytes ec_data = 17;
* @return Whether the ecData field is set.
*/
@java.lang.Override
public boolean hasEcData() {
return ((bitField0_ & 0x00001000) != 0);
}
/**
*
* storagePolicy = 16
*
*
* optional bytes ec_data = 17;
* @return The ecData.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString getEcData() {
return ecData_;
}
/**
*
* storagePolicy = 16
*
*
* optional bytes ec_data = 17;
* @param value The ecData to set.
* @return This builder for chaining.
*/
public Builder setEcData(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
ecData_ = value;
bitField0_ |= 0x00001000;
onChanged();
return this;
}
/**
*
* storagePolicy = 16
*
*
* optional bytes ec_data = 17;
* @return This builder for chaining.
*/
public Builder clearEcData() {
bitField0_ = (bitField0_ & ~0x00001000);
ecData_ = getDefaultInstance().getEcData();
onChanged();
return this;
}
private int flags_ ;
/**
* optional uint32 flags = 18 [default = 0];
* @return Whether the flags field is set.
*/
@java.lang.Override
public boolean hasFlags() {
return ((bitField0_ & 0x00002000) != 0);
}
/**
* optional uint32 flags = 18 [default = 0];
* @return The flags.
*/
@java.lang.Override
public int getFlags() {
return flags_;
}
/**
* optional uint32 flags = 18 [default = 0];
* @param value The flags to set.
* @return This builder for chaining.
*/
public Builder setFlags(int value) {
flags_ = value;
bitField0_ |= 0x00002000;
onChanged();
return this;
}
/**
* optional uint32 flags = 18 [default = 0];
* @return This builder for chaining.
*/
public Builder clearFlags() {
bitField0_ = (bitField0_ & ~0x00002000);
flags_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.fs.FileStatusProto)
}
// @@protoc_insertion_point(class_scope:hadoop.fs.FileStatusProto)
private static final org.apache.hadoop.fs.FSProtos.FileStatusProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.fs.FSProtos.FileStatusProto();
}
public static org.apache.hadoop.fs.FSProtos.FileStatusProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public FileStatusProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.FileStatusProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface LocalFileSystemPathHandleProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.fs.LocalFileSystemPathHandleProto)
org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* optional uint64 mtime = 1;
* @return Whether the mtime field is set.
*/
boolean hasMtime();
/**
* optional uint64 mtime = 1;
* @return The mtime.
*/
long getMtime();
/**
* optional string path = 2;
* @return Whether the path field is set.
*/
boolean hasPath();
/**
* optional string path = 2;
* @return The path.
*/
java.lang.String getPath();
/**
* optional string path = 2;
* @return The bytes for path.
*/
org.apache.hadoop.thirdparty.protobuf.ByteString
getPathBytes();
}
/**
*
**
* Placeholder type for consistent basic FileSystem operations.
*
*
* Protobuf type {@code hadoop.fs.LocalFileSystemPathHandleProto}
*/
public static final class LocalFileSystemPathHandleProto extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.fs.LocalFileSystemPathHandleProto)
LocalFileSystemPathHandleProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use LocalFileSystemPathHandleProto.newBuilder() to construct.
private LocalFileSystemPathHandleProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private LocalFileSystemPathHandleProto() {
path_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new LocalFileSystemPathHandleProto();
}
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_LocalFileSystemPathHandleProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_LocalFileSystemPathHandleProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto.class, org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto.Builder.class);
}
private int bitField0_;
public static final int MTIME_FIELD_NUMBER = 1;
private long mtime_ = 0L;
/**
* optional uint64 mtime = 1;
* @return Whether the mtime field is set.
*/
@java.lang.Override
public boolean hasMtime() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional uint64 mtime = 1;
* @return The mtime.
*/
@java.lang.Override
public long getMtime() {
return mtime_;
}
public static final int PATH_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private volatile java.lang.Object path_ = "";
/**
* optional string path = 2;
* @return Whether the path field is set.
*/
@java.lang.Override
public boolean hasPath() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string path = 2;
* @return The path.
*/
@java.lang.Override
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
path_ = s;
}
return s;
}
}
/**
* optional string path = 2;
* @return The bytes for path.
*/
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt64(1, mtime_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, path_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.computeUInt64Size(1, mtime_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, path_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto)) {
return super.equals(obj);
}
org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto other = (org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto) obj;
if (hasMtime() != other.hasMtime()) return false;
if (hasMtime()) {
if (getMtime()
!= other.getMtime()) return false;
}
if (hasPath() != other.hasPath()) return false;
if (hasPath()) {
if (!getPath()
.equals(other.getPath())) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasMtime()) {
hash = (37 * hash) + MTIME_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getMtime());
}
if (hasPath()) {
hash = (37 * hash) + PATH_FIELD_NUMBER;
hash = (53 * hash) + getPath().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(byte[] data)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(
byte[] data,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseDelimitedFrom(
java.io.InputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto parseFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Placeholder type for consistent basic FileSystem operations.
*
*
* Protobuf type {@code hadoop.fs.LocalFileSystemPathHandleProto}
*/
public static final class Builder extends
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.fs.LocalFileSystemPathHandleProto)
org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProtoOrBuilder {
public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_LocalFileSystemPathHandleProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_LocalFileSystemPathHandleProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto.class, org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto.Builder.class);
}
// Construct using org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto.newBuilder()
private Builder() {
}
private Builder(
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
mtime_ = 0L;
path_ = "";
return this;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.fs.FSProtos.internal_static_hadoop_fs_LocalFileSystemPathHandleProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto getDefaultInstanceForType() {
return org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto build() {
org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto buildPartial() {
org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto result = new org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.mtime_ = mtime_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.path_ = path_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto) {
return mergeFrom((org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto other) {
if (other == org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto.getDefaultInstance()) return this;
if (other.hasMtime()) {
setMtime(other.getMtime());
}
if (other.hasPath()) {
path_ = other.path_;
bitField0_ |= 0x00000002;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
mtime_ = input.readUInt64();
bitField0_ |= 0x00000001;
break;
} // case 8
case 18: {
path_ = input.readBytes();
bitField0_ |= 0x00000002;
break;
} // case 18
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private long mtime_ ;
/**
* optional uint64 mtime = 1;
* @return Whether the mtime field is set.
*/
@java.lang.Override
public boolean hasMtime() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional uint64 mtime = 1;
* @return The mtime.
*/
@java.lang.Override
public long getMtime() {
return mtime_;
}
/**
* optional uint64 mtime = 1;
* @param value The mtime to set.
* @return This builder for chaining.
*/
public Builder setMtime(long value) {
mtime_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* optional uint64 mtime = 1;
* @return This builder for chaining.
*/
public Builder clearMtime() {
bitField0_ = (bitField0_ & ~0x00000001);
mtime_ = 0L;
onChanged();
return this;
}
private java.lang.Object path_ = "";
/**
* optional string path = 2;
* @return Whether the path field is set.
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string path = 2;
* @return The path.
*/
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
path_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string path = 2;
* @return The bytes for path.
*/
public org.apache.hadoop.thirdparty.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof String) {
org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string path = 2;
* @param value The path to set.
* @return This builder for chaining.
*/
public Builder setPath(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
path_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* optional string path = 2;
* @return This builder for chaining.
*/
public Builder clearPath() {
path_ = getDefaultInstance().getPath();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
* optional string path = 2;
* @param value The bytes for path to set.
* @return This builder for chaining.
*/
public Builder setPathBytes(
org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
path_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.fs.LocalFileSystemPathHandleProto)
}
// @@protoc_insertion_point(class_scope:hadoop.fs.LocalFileSystemPathHandleProto)
private static final org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto();
}
public static org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public LocalFileSystemPathHandleProto parsePartialFrom(
org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_fs_FsPermissionProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_fs_FsPermissionProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_fs_FileStatusProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_fs_FileStatusProto_fieldAccessorTable;
private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_fs_LocalFileSystemPathHandleProto_descriptor;
private static final
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_fs_LocalFileSystemPathHandleProto_fieldAccessorTable;
public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\016FSProtos.proto\022\thadoop.fs\"!\n\021FsPermiss" +
"ionProto\022\014\n\004perm\030\001 \002(\r\"\336\003\n\017FileStatusPro" +
"to\0225\n\010fileType\030\001 \002(\0162#.hadoop.fs.FileSta" +
"tusProto.FileType\022\014\n\004path\030\002 \002(\t\022\016\n\006lengt" +
"h\030\003 \001(\004\0220\n\npermission\030\004 \001(\0132\034.hadoop.fs." +
"FsPermissionProto\022\r\n\005owner\030\005 \001(\t\022\r\n\005grou" +
"p\030\006 \001(\t\022\031\n\021modification_time\030\007 \001(\004\022\023\n\013ac" +
"cess_time\030\010 \001(\004\022\017\n\007symlink\030\t \001(\t\022\031\n\021bloc" +
"k_replication\030\n \001(\r\022\022\n\nblock_size\030\013 \001(\004\022" +
"\027\n\017encryption_data\030\017 \001(\014\022\017\n\007ec_data\030\021 \001(" +
"\014\022\020\n\005flags\030\022 \001(\r:\0010\"3\n\010FileType\022\n\n\006FT_DI" +
"R\020\001\022\013\n\007FT_FILE\020\002\022\016\n\nFT_SYMLINK\020\003\"E\n\005Flag" +
"s\022\013\n\007HAS_ACL\020\001\022\r\n\tHAS_CRYPT\020\002\022\n\n\006HAS_EC\020" +
"\004\022\024\n\020SNAPSHOT_ENABLED\020\010\"=\n\036LocalFileSyst" +
"emPathHandleProto\022\r\n\005mtime\030\001 \001(\004\022\014\n\004path" +
"\030\002 \001(\tB&\n\024org.apache.hadoop.fsB\010FSProtos" +
"\210\001\001\240\001\001"
};
descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
});
internal_static_hadoop_fs_FsPermissionProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_fs_FsPermissionProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_fs_FsPermissionProto_descriptor,
new java.lang.String[] { "Perm", });
internal_static_hadoop_fs_FileStatusProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_fs_FileStatusProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_fs_FileStatusProto_descriptor,
new java.lang.String[] { "FileType", "Path", "Length", "Permission", "Owner", "Group", "ModificationTime", "AccessTime", "Symlink", "BlockReplication", "BlockSize", "EncryptionData", "EcData", "Flags", });
internal_static_hadoop_fs_LocalFileSystemPathHandleProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_fs_LocalFileSystemPathHandleProto_fieldAccessorTable = new
org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_fs_LocalFileSystemPathHandleProto_descriptor,
new java.lang.String[] { "Mtime", "Path", });
}
// @@protoc_insertion_point(outer_class_scope)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy