org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: JournalProtocol.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class JournalProtocolProtos {
private JournalProtocolProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface JournalInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string clusterID = 1;
boolean hasClusterID();
String getClusterID();
// optional uint32 layoutVersion = 2;
boolean hasLayoutVersion();
int getLayoutVersion();
// optional uint32 namespaceID = 3;
boolean hasNamespaceID();
int getNamespaceID();
}
public static final class JournalInfoProto extends
com.google.protobuf.GeneratedMessage
implements JournalInfoProtoOrBuilder {
// Use JournalInfoProto.newBuilder() to construct.
private JournalInfoProto(Builder builder) {
super(builder);
}
private JournalInfoProto(boolean noInit) {}
private static final JournalInfoProto defaultInstance;
public static JournalInfoProto getDefaultInstance() {
return defaultInstance;
}
public JournalInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalInfoProto_fieldAccessorTable;
}
private int bitField0_;
// required string clusterID = 1;
public static final int CLUSTERID_FIELD_NUMBER = 1;
private java.lang.Object clusterID_;
public boolean hasClusterID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getClusterID() {
java.lang.Object ref = clusterID_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
clusterID_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getClusterIDBytes() {
java.lang.Object ref = clusterID_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
clusterID_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional uint32 layoutVersion = 2;
public static final int LAYOUTVERSION_FIELD_NUMBER = 2;
private int layoutVersion_;
public boolean hasLayoutVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public int getLayoutVersion() {
return layoutVersion_;
}
// optional uint32 namespaceID = 3;
public static final int NAMESPACEID_FIELD_NUMBER = 3;
private int namespaceID_;
public boolean hasNamespaceID() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public int getNamespaceID() {
return namespaceID_;
}
private void initFields() {
clusterID_ = "";
layoutVersion_ = 0;
namespaceID_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasClusterID()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getClusterIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, layoutVersion_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, namespaceID_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getClusterIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, layoutVersion_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, namespaceID_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto) obj;
boolean result = true;
result = result && (hasClusterID() == other.hasClusterID());
if (hasClusterID()) {
result = result && getClusterID()
.equals(other.getClusterID());
}
result = result && (hasLayoutVersion() == other.hasLayoutVersion());
if (hasLayoutVersion()) {
result = result && (getLayoutVersion()
== other.getLayoutVersion());
}
result = result && (hasNamespaceID() == other.hasNamespaceID());
if (hasNamespaceID()) {
result = result && (getNamespaceID()
== other.getNamespaceID());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasClusterID()) {
hash = (37 * hash) + CLUSTERID_FIELD_NUMBER;
hash = (53 * hash) + getClusterID().hashCode();
}
if (hasLayoutVersion()) {
hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER;
hash = (53 * hash) + getLayoutVersion();
}
if (hasNamespaceID()) {
hash = (37 * hash) + NAMESPACEID_FIELD_NUMBER;
hash = (53 * hash) + getNamespaceID();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalInfoProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
clusterID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
layoutVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
namespaceID_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.clusterID_ = clusterID_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.layoutVersion_ = layoutVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.namespaceID_ = namespaceID_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance()) return this;
if (other.hasClusterID()) {
setClusterID(other.getClusterID());
}
if (other.hasLayoutVersion()) {
setLayoutVersion(other.getLayoutVersion());
}
if (other.hasNamespaceID()) {
setNamespaceID(other.getNamespaceID());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasClusterID()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
clusterID_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
layoutVersion_ = input.readUInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
namespaceID_ = input.readUInt32();
break;
}
}
}
}
private int bitField0_;
// required string clusterID = 1;
private java.lang.Object clusterID_ = "";
public boolean hasClusterID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getClusterID() {
java.lang.Object ref = clusterID_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
clusterID_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setClusterID(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
clusterID_ = value;
onChanged();
return this;
}
public Builder clearClusterID() {
bitField0_ = (bitField0_ & ~0x00000001);
clusterID_ = getDefaultInstance().getClusterID();
onChanged();
return this;
}
void setClusterID(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000001;
clusterID_ = value;
onChanged();
}
// optional uint32 layoutVersion = 2;
private int layoutVersion_ ;
public boolean hasLayoutVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public int getLayoutVersion() {
return layoutVersion_;
}
public Builder setLayoutVersion(int value) {
bitField0_ |= 0x00000002;
layoutVersion_ = value;
onChanged();
return this;
}
public Builder clearLayoutVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
layoutVersion_ = 0;
onChanged();
return this;
}
// optional uint32 namespaceID = 3;
private int namespaceID_ ;
public boolean hasNamespaceID() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public int getNamespaceID() {
return namespaceID_;
}
public Builder setNamespaceID(int value) {
bitField0_ |= 0x00000004;
namespaceID_ = value;
onChanged();
return this;
}
public Builder clearNamespaceID() {
bitField0_ = (bitField0_ & ~0x00000004);
namespaceID_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:JournalInfoProto)
}
static {
defaultInstance = new JournalInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:JournalInfoProto)
}
public interface JournalRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .JournalInfoProto journalInfo = 1;
boolean hasJournalInfo();
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto getJournalInfo();
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder getJournalInfoOrBuilder();
// required uint64 firstTxnId = 2;
boolean hasFirstTxnId();
long getFirstTxnId();
// required uint32 numTxns = 3;
boolean hasNumTxns();
int getNumTxns();
// required bytes records = 4;
boolean hasRecords();
com.google.protobuf.ByteString getRecords();
}
public static final class JournalRequestProto extends
com.google.protobuf.GeneratedMessage
implements JournalRequestProtoOrBuilder {
// Use JournalRequestProto.newBuilder() to construct.
private JournalRequestProto(Builder builder) {
super(builder);
}
private JournalRequestProto(boolean noInit) {}
private static final JournalRequestProto defaultInstance;
public static JournalRequestProto getDefaultInstance() {
return defaultInstance;
}
public JournalRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable;
}
private int bitField0_;
// required .JournalInfoProto journalInfo = 1;
public static final int JOURNALINFO_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto journalInfo_;
public boolean hasJournalInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto getJournalInfo() {
return journalInfo_;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder getJournalInfoOrBuilder() {
return journalInfo_;
}
// required uint64 firstTxnId = 2;
public static final int FIRSTTXNID_FIELD_NUMBER = 2;
private long firstTxnId_;
public boolean hasFirstTxnId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getFirstTxnId() {
return firstTxnId_;
}
// required uint32 numTxns = 3;
public static final int NUMTXNS_FIELD_NUMBER = 3;
private int numTxns_;
public boolean hasNumTxns() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public int getNumTxns() {
return numTxns_;
}
// required bytes records = 4;
public static final int RECORDS_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString records_;
public boolean hasRecords() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public com.google.protobuf.ByteString getRecords() {
return records_;
}
private void initFields() {
journalInfo_ = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
firstTxnId_ = 0L;
numTxns_ = 0;
records_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasJournalInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasFirstTxnId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNumTxns()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasRecords()) {
memoizedIsInitialized = 0;
return false;
}
if (!getJournalInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, journalInfo_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, firstTxnId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, numTxns_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, records_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, journalInfo_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, firstTxnId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, numTxns_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, records_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto) obj;
boolean result = true;
result = result && (hasJournalInfo() == other.hasJournalInfo());
if (hasJournalInfo()) {
result = result && getJournalInfo()
.equals(other.getJournalInfo());
}
result = result && (hasFirstTxnId() == other.hasFirstTxnId());
if (hasFirstTxnId()) {
result = result && (getFirstTxnId()
== other.getFirstTxnId());
}
result = result && (hasNumTxns() == other.hasNumTxns());
if (hasNumTxns()) {
result = result && (getNumTxns()
== other.getNumTxns());
}
result = result && (hasRecords() == other.hasRecords());
if (hasRecords()) {
result = result && getRecords()
.equals(other.getRecords());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasJournalInfo()) {
hash = (37 * hash) + JOURNALINFO_FIELD_NUMBER;
hash = (53 * hash) + getJournalInfo().hashCode();
}
if (hasFirstTxnId()) {
hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFirstTxnId());
}
if (hasNumTxns()) {
hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
hash = (53 * hash) + getNumTxns();
}
if (hasRecords()) {
hash = (37 * hash) + RECORDS_FIELD_NUMBER;
hash = (53 * hash) + getRecords().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getJournalInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (journalInfoBuilder_ == null) {
journalInfo_ = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
} else {
journalInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
firstTxnId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
numTxns_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
records_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (journalInfoBuilder_ == null) {
result.journalInfo_ = journalInfo_;
} else {
result.journalInfo_ = journalInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.firstTxnId_ = firstTxnId_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.numTxns_ = numTxns_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.records_ = records_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
if (other.hasJournalInfo()) {
mergeJournalInfo(other.getJournalInfo());
}
if (other.hasFirstTxnId()) {
setFirstTxnId(other.getFirstTxnId());
}
if (other.hasNumTxns()) {
setNumTxns(other.getNumTxns());
}
if (other.hasRecords()) {
setRecords(other.getRecords());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasJournalInfo()) {
return false;
}
if (!hasFirstTxnId()) {
return false;
}
if (!hasNumTxns()) {
return false;
}
if (!hasRecords()) {
return false;
}
if (!getJournalInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.newBuilder();
if (hasJournalInfo()) {
subBuilder.mergeFrom(getJournalInfo());
}
input.readMessage(subBuilder, extensionRegistry);
setJournalInfo(subBuilder.buildPartial());
break;
}
case 16: {
bitField0_ |= 0x00000002;
firstTxnId_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
numTxns_ = input.readUInt32();
break;
}
case 34: {
bitField0_ |= 0x00000008;
records_ = input.readBytes();
break;
}
}
}
}
private int bitField0_;
// required .JournalInfoProto journalInfo = 1;
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto journalInfo_ = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder> journalInfoBuilder_;
public boolean hasJournalInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto getJournalInfo() {
if (journalInfoBuilder_ == null) {
return journalInfo_;
} else {
return journalInfoBuilder_.getMessage();
}
}
public Builder setJournalInfo(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto value) {
if (journalInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
journalInfo_ = value;
onChanged();
} else {
journalInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setJournalInfo(
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder builderForValue) {
if (journalInfoBuilder_ == null) {
journalInfo_ = builderForValue.build();
onChanged();
} else {
journalInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeJournalInfo(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto value) {
if (journalInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
journalInfo_ != org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance()) {
journalInfo_ =
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.newBuilder(journalInfo_).mergeFrom(value).buildPartial();
} else {
journalInfo_ = value;
}
onChanged();
} else {
journalInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearJournalInfo() {
if (journalInfoBuilder_ == null) {
journalInfo_ = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
onChanged();
} else {
journalInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder getJournalInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getJournalInfoFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder getJournalInfoOrBuilder() {
if (journalInfoBuilder_ != null) {
return journalInfoBuilder_.getMessageOrBuilder();
} else {
return journalInfo_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder>
getJournalInfoFieldBuilder() {
if (journalInfoBuilder_ == null) {
journalInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder>(
journalInfo_,
getParentForChildren(),
isClean());
journalInfo_ = null;
}
return journalInfoBuilder_;
}
// required uint64 firstTxnId = 2;
private long firstTxnId_ ;
public boolean hasFirstTxnId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getFirstTxnId() {
return firstTxnId_;
}
public Builder setFirstTxnId(long value) {
bitField0_ |= 0x00000002;
firstTxnId_ = value;
onChanged();
return this;
}
public Builder clearFirstTxnId() {
bitField0_ = (bitField0_ & ~0x00000002);
firstTxnId_ = 0L;
onChanged();
return this;
}
// required uint32 numTxns = 3;
private int numTxns_ ;
public boolean hasNumTxns() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public int getNumTxns() {
return numTxns_;
}
public Builder setNumTxns(int value) {
bitField0_ |= 0x00000004;
numTxns_ = value;
onChanged();
return this;
}
public Builder clearNumTxns() {
bitField0_ = (bitField0_ & ~0x00000004);
numTxns_ = 0;
onChanged();
return this;
}
// required bytes records = 4;
private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
public boolean hasRecords() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public com.google.protobuf.ByteString getRecords() {
return records_;
}
public Builder setRecords(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
records_ = value;
onChanged();
return this;
}
public Builder clearRecords() {
bitField0_ = (bitField0_ & ~0x00000008);
records_ = getDefaultInstance().getRecords();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:JournalRequestProto)
}
static {
defaultInstance = new JournalRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:JournalRequestProto)
}
public interface JournalResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
public static final class JournalResponseProto extends
com.google.protobuf.GeneratedMessage
implements JournalResponseProtoOrBuilder {
// Use JournalResponseProto.newBuilder() to construct.
private JournalResponseProto(Builder builder) {
super(builder);
}
private JournalResponseProto(boolean noInit) {}
private static final JournalResponseProto defaultInstance;
public static JournalResponseProto getDefaultInstance() {
return defaultInstance;
}
public JournalResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
}
}
}
// @@protoc_insertion_point(builder_scope:JournalResponseProto)
}
static {
defaultInstance = new JournalResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:JournalResponseProto)
}
public interface StartLogSegmentRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .JournalInfoProto journalInfo = 1;
boolean hasJournalInfo();
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto getJournalInfo();
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder getJournalInfoOrBuilder();
// required uint64 txid = 2;
boolean hasTxid();
long getTxid();
}
public static final class StartLogSegmentRequestProto extends
com.google.protobuf.GeneratedMessage
implements StartLogSegmentRequestProtoOrBuilder {
// Use StartLogSegmentRequestProto.newBuilder() to construct.
private StartLogSegmentRequestProto(Builder builder) {
super(builder);
}
private StartLogSegmentRequestProto(boolean noInit) {}
private static final StartLogSegmentRequestProto defaultInstance;
public static StartLogSegmentRequestProto getDefaultInstance() {
return defaultInstance;
}
public StartLogSegmentRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
}
private int bitField0_;
// required .JournalInfoProto journalInfo = 1;
public static final int JOURNALINFO_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto journalInfo_;
public boolean hasJournalInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto getJournalInfo() {
return journalInfo_;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder getJournalInfoOrBuilder() {
return journalInfo_;
}
// required uint64 txid = 2;
public static final int TXID_FIELD_NUMBER = 2;
private long txid_;
public boolean hasTxid() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getTxid() {
return txid_;
}
private void initFields() {
journalInfo_ = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
txid_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasJournalInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasTxid()) {
memoizedIsInitialized = 0;
return false;
}
if (!getJournalInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, journalInfo_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, txid_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, journalInfo_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, txid_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto) obj;
boolean result = true;
result = result && (hasJournalInfo() == other.hasJournalInfo());
if (hasJournalInfo()) {
result = result && getJournalInfo()
.equals(other.getJournalInfo());
}
result = result && (hasTxid() == other.hasTxid());
if (hasTxid()) {
result = result && (getTxid()
== other.getTxid());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasJournalInfo()) {
hash = (37 * hash) + JOURNALINFO_FIELD_NUMBER;
hash = (53 * hash) + getJournalInfo().hashCode();
}
if (hasTxid()) {
hash = (37 * hash) + TXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTxid());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getJournalInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (journalInfoBuilder_ == null) {
journalInfo_ = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
} else {
journalInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
txid_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (journalInfoBuilder_ == null) {
result.journalInfo_ = journalInfo_;
} else {
result.journalInfo_ = journalInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.txid_ = txid_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
if (other.hasJournalInfo()) {
mergeJournalInfo(other.getJournalInfo());
}
if (other.hasTxid()) {
setTxid(other.getTxid());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasJournalInfo()) {
return false;
}
if (!hasTxid()) {
return false;
}
if (!getJournalInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.newBuilder();
if (hasJournalInfo()) {
subBuilder.mergeFrom(getJournalInfo());
}
input.readMessage(subBuilder, extensionRegistry);
setJournalInfo(subBuilder.buildPartial());
break;
}
case 16: {
bitField0_ |= 0x00000002;
txid_ = input.readUInt64();
break;
}
}
}
}
private int bitField0_;
// required .JournalInfoProto journalInfo = 1;
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto journalInfo_ = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder> journalInfoBuilder_;
public boolean hasJournalInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto getJournalInfo() {
if (journalInfoBuilder_ == null) {
return journalInfo_;
} else {
return journalInfoBuilder_.getMessage();
}
}
public Builder setJournalInfo(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto value) {
if (journalInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
journalInfo_ = value;
onChanged();
} else {
journalInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setJournalInfo(
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder builderForValue) {
if (journalInfoBuilder_ == null) {
journalInfo_ = builderForValue.build();
onChanged();
} else {
journalInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeJournalInfo(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto value) {
if (journalInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
journalInfo_ != org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance()) {
journalInfo_ =
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.newBuilder(journalInfo_).mergeFrom(value).buildPartial();
} else {
journalInfo_ = value;
}
onChanged();
} else {
journalInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearJournalInfo() {
if (journalInfoBuilder_ == null) {
journalInfo_ = org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.getDefaultInstance();
onChanged();
} else {
journalInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder getJournalInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getJournalInfoFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder getJournalInfoOrBuilder() {
if (journalInfoBuilder_ != null) {
return journalInfoBuilder_.getMessageOrBuilder();
} else {
return journalInfo_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder>
getJournalInfoFieldBuilder() {
if (journalInfoBuilder_ == null) {
journalInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProtoOrBuilder>(
journalInfo_,
getParentForChildren(),
isClean());
journalInfo_ = null;
}
return journalInfoBuilder_;
}
// required uint64 txid = 2;
private long txid_ ;
public boolean hasTxid() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getTxid() {
return txid_;
}
public Builder setTxid(long value) {
bitField0_ |= 0x00000002;
txid_ = value;
onChanged();
return this;
}
public Builder clearTxid() {
bitField0_ = (bitField0_ & ~0x00000002);
txid_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:StartLogSegmentRequestProto)
}
static {
defaultInstance = new StartLogSegmentRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:StartLogSegmentRequestProto)
}
public interface StartLogSegmentResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
public static final class StartLogSegmentResponseProto extends
com.google.protobuf.GeneratedMessage
implements StartLogSegmentResponseProtoOrBuilder {
// Use StartLogSegmentResponseProto.newBuilder() to construct.
private StartLogSegmentResponseProto(Builder builder) {
super(builder);
}
private StartLogSegmentResponseProto(boolean noInit) {}
private static final StartLogSegmentResponseProto defaultInstance;
public static StartLogSegmentResponseProto getDefaultInstance() {
return defaultInstance;
}
public StartLogSegmentResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
}
}
}
// @@protoc_insertion_point(builder_scope:StartLogSegmentResponseProto)
}
static {
defaultInstance = new StartLogSegmentResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:StartLogSegmentResponseProto)
}
public static abstract class JournalProtocolService
implements com.google.protobuf.Service {
protected JournalProtocolService() {}
public interface Interface {
public abstract void journal(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void startLogSegment(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request,
com.google.protobuf.RpcCallback done);
}
public static com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new JournalProtocolService() {
@java.lang.Override
public void journal(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.journal(controller, request, done);
}
@java.lang.Override
public void startLogSegment(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.startLogSegment(controller, request, done);
}
};
}
public static com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new com.google.protobuf.BlockingService() {
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final com.google.protobuf.Message callBlockingMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request)
throws com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.journal(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)request);
case 1:
return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
public abstract void journal(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void startLogSegment(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request,
com.google.protobuf.RpcCallback done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.getDescriptor().getServices().get(0);
}
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request,
com.google.protobuf.RpcCallback<
com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.journal(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 1:
this.startLogSegment(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService implements Interface {
private Stub(com.google.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.RpcChannel channel;
public com.google.protobuf.RpcChannel getChannel() {
return channel;
}
public void journal(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
}
public void startLogSegment(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
com.google.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto journal(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request)
throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto journal(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
}
}
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_JournalInfoProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_JournalInfoProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_JournalRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_JournalRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_JournalResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_JournalResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_StartLogSegmentRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_StartLogSegmentResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\025JournalProtocol.proto\032\nhdfs.proto\"Q\n\020J" +
"ournalInfoProto\022\021\n\tclusterID\030\001 \002(\t\022\025\n\rla" +
"youtVersion\030\002 \001(\r\022\023\n\013namespaceID\030\003 \001(\r\"s" +
"\n\023JournalRequestProto\022&\n\013journalInfo\030\001 \002" +
"(\0132\021.JournalInfoProto\022\022\n\nfirstTxnId\030\002 \002(" +
"\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007records\030\004 \002(\014\"\026\n\024J" +
"ournalResponseProto\"S\n\033StartLogSegmentRe" +
"questProto\022&\n\013journalInfo\030\001 \002(\0132\021.Journa" +
"lInfoProto\022\014\n\004txid\030\002 \002(\004\"\036\n\034StartLogSegm" +
"entResponseProto2\240\001\n\026JournalProtocolServ",
"ice\0226\n\007journal\022\024.JournalRequestProto\032\025.J" +
"ournalResponseProto\022N\n\017startLogSegment\022\034" +
".StartLogSegmentRequestProto\032\035.StartLogS" +
"egmentResponseProtoBD\n%org.apache.hadoop" +
".hdfs.protocol.protoB\025JournalProtocolPro" +
"tos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_JournalInfoProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_JournalInfoProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_JournalInfoProto_descriptor,
new java.lang.String[] { "ClusterID", "LayoutVersion", "NamespaceID", },
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.class,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto.Builder.class);
internal_static_JournalRequestProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_JournalRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_JournalRequestProto_descriptor,
new java.lang.String[] { "JournalInfo", "FirstTxnId", "NumTxns", "Records", },
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.Builder.class);
internal_static_JournalResponseProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_JournalResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_JournalResponseProto_descriptor,
new java.lang.String[] { },
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.Builder.class);
internal_static_StartLogSegmentRequestProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_StartLogSegmentRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StartLogSegmentRequestProto_descriptor,
new java.lang.String[] { "JournalInfo", "Txid", },
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
internal_static_StartLogSegmentResponseProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_StartLogSegmentResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StartLogSegmentResponseProto_descriptor,
new java.lang.String[] { },
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy