org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata Maven / Gradle / Ivy
The newest version!
/**
* Autogenerated by Avro
*
* DO NOT EDIT DIRECTLY
*/
package org.apache.hudi.avro.model;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import org.apache.avro.message.BinaryMessageEncoder;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.message.SchemaStore;
@org.apache.avro.specific.AvroGenerated
public class HoodieRollbackPartitionMetadata extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
private static final long serialVersionUID = 7775895750247689694L;
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"HoodieRollbackPartitionMetadata\",\"namespace\":\"org.apache.hudi.avro.model\",\"fields\":[{\"name\":\"partitionPath\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"successDeleteFiles\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}},{\"name\":\"failedDeleteFiles\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}},{\"name\":\"rollbackLogFiles\",\"type\":[\"null\",{\"type\":\"map\",\"values\":\"long\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"logFilesFromFailedCommit\",\"type\":[\"null\",{\"type\":\"map\",\"values\":\"long\",\"avro.java.string\":\"String\"}],\"default\":null}]}");
public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
private static final SpecificData MODEL$ = new SpecificData();
private static final BinaryMessageEncoder ENCODER =
new BinaryMessageEncoder<>(MODEL$, SCHEMA$);
private static final BinaryMessageDecoder DECODER =
new BinaryMessageDecoder<>(MODEL$, SCHEMA$);
/**
* Return the BinaryMessageEncoder instance used by this class.
* @return the message encoder used by this class
*/
public static BinaryMessageEncoder getEncoder() {
return ENCODER;
}
/**
* Return the BinaryMessageDecoder instance used by this class.
* @return the message decoder used by this class
*/
public static BinaryMessageDecoder getDecoder() {
return DECODER;
}
/**
* Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}.
* @param resolver a {@link SchemaStore} used to find schemas by fingerprint
* @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore
*/
public static BinaryMessageDecoder createDecoder(SchemaStore resolver) {
return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver);
}
/**
* Serializes this HoodieRollbackPartitionMetadata to a ByteBuffer.
* @return a buffer holding the serialized data for this instance
* @throws java.io.IOException if this instance could not be serialized
*/
public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException {
return ENCODER.encode(this);
}
/**
* Deserializes a HoodieRollbackPartitionMetadata from a ByteBuffer.
* @param b a byte buffer holding serialized data for an instance of this class
* @return a HoodieRollbackPartitionMetadata instance decoded from the given buffer
* @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class
*/
public static HoodieRollbackPartitionMetadata fromByteBuffer(
java.nio.ByteBuffer b) throws java.io.IOException {
return DECODER.decode(b);
}
private java.lang.String partitionPath;
private java.util.List successDeleteFiles;
private java.util.List failedDeleteFiles;
private java.util.Map rollbackLogFiles;
private java.util.Map logFilesFromFailedCommit;
/**
* Default constructor. Note that this does not initialize fields
* to their default values from the schema. If that is desired then
* one should use newBuilder()
.
*/
public HoodieRollbackPartitionMetadata() {}
/**
* All-args constructor.
* @param partitionPath The new value for partitionPath
* @param successDeleteFiles The new value for successDeleteFiles
* @param failedDeleteFiles The new value for failedDeleteFiles
* @param rollbackLogFiles The new value for rollbackLogFiles
* @param logFilesFromFailedCommit The new value for logFilesFromFailedCommit
*/
public HoodieRollbackPartitionMetadata(java.lang.String partitionPath, java.util.List successDeleteFiles, java.util.List failedDeleteFiles, java.util.Map rollbackLogFiles, java.util.Map logFilesFromFailedCommit) {
this.partitionPath = partitionPath;
this.successDeleteFiles = successDeleteFiles;
this.failedDeleteFiles = failedDeleteFiles;
this.rollbackLogFiles = rollbackLogFiles;
this.logFilesFromFailedCommit = logFilesFromFailedCommit;
}
@Override
public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; }
@Override
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
// Used by DatumWriter. Applications should not call.
@Override
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return partitionPath;
case 1: return successDeleteFiles;
case 2: return failedDeleteFiles;
case 3: return rollbackLogFiles;
case 4: return logFilesFromFailedCommit;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
// Used by DatumReader. Applications should not call.
@Override
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: partitionPath = value$ != null ? value$.toString() : null; break;
case 1: successDeleteFiles = (java.util.List)value$; break;
case 2: failedDeleteFiles = (java.util.List)value$; break;
case 3: rollbackLogFiles = (java.util.Map)value$; break;
case 4: logFilesFromFailedCommit = (java.util.Map)value$; break;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
/**
* Gets the value of the 'partitionPath' field.
* @return The value of the 'partitionPath' field.
*/
public java.lang.String getPartitionPath() {
return partitionPath;
}
/**
* Sets the value of the 'partitionPath' field.
* @param value the value to set.
*/
public void setPartitionPath(java.lang.String value) {
this.partitionPath = value;
}
/**
* Gets the value of the 'successDeleteFiles' field.
* @return The value of the 'successDeleteFiles' field.
*/
public java.util.List getSuccessDeleteFiles() {
return successDeleteFiles;
}
/**
* Sets the value of the 'successDeleteFiles' field.
* @param value the value to set.
*/
public void setSuccessDeleteFiles(java.util.List value) {
this.successDeleteFiles = value;
}
/**
* Gets the value of the 'failedDeleteFiles' field.
* @return The value of the 'failedDeleteFiles' field.
*/
public java.util.List getFailedDeleteFiles() {
return failedDeleteFiles;
}
/**
* Sets the value of the 'failedDeleteFiles' field.
* @param value the value to set.
*/
public void setFailedDeleteFiles(java.util.List value) {
this.failedDeleteFiles = value;
}
/**
* Gets the value of the 'rollbackLogFiles' field.
* @return The value of the 'rollbackLogFiles' field.
*/
public java.util.Map getRollbackLogFiles() {
return rollbackLogFiles;
}
/**
* Sets the value of the 'rollbackLogFiles' field.
* @param value the value to set.
*/
public void setRollbackLogFiles(java.util.Map value) {
this.rollbackLogFiles = value;
}
/**
* Gets the value of the 'logFilesFromFailedCommit' field.
* @return The value of the 'logFilesFromFailedCommit' field.
*/
public java.util.Map getLogFilesFromFailedCommit() {
return logFilesFromFailedCommit;
}
/**
* Sets the value of the 'logFilesFromFailedCommit' field.
* @param value the value to set.
*/
public void setLogFilesFromFailedCommit(java.util.Map value) {
this.logFilesFromFailedCommit = value;
}
/**
* Creates a new HoodieRollbackPartitionMetadata RecordBuilder.
* @return A new HoodieRollbackPartitionMetadata RecordBuilder
*/
public static org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder newBuilder() {
return new org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder();
}
/**
* Creates a new HoodieRollbackPartitionMetadata RecordBuilder by copying an existing Builder.
* @param other The existing builder to copy.
* @return A new HoodieRollbackPartitionMetadata RecordBuilder
*/
public static org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder newBuilder(org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder other) {
if (other == null) {
return new org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder();
} else {
return new org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder(other);
}
}
/**
* Creates a new HoodieRollbackPartitionMetadata RecordBuilder by copying an existing HoodieRollbackPartitionMetadata instance.
* @param other The existing instance to copy.
* @return A new HoodieRollbackPartitionMetadata RecordBuilder
*/
public static org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder newBuilder(org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata other) {
if (other == null) {
return new org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder();
} else {
return new org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder(other);
}
}
/**
* RecordBuilder for HoodieRollbackPartitionMetadata instances.
*/
@org.apache.avro.specific.AvroGenerated
public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase
implements org.apache.avro.data.RecordBuilder {
private java.lang.String partitionPath;
private java.util.List successDeleteFiles;
private java.util.List failedDeleteFiles;
private java.util.Map rollbackLogFiles;
private java.util.Map logFilesFromFailedCommit;
/** Creates a new Builder */
private Builder() {
super(SCHEMA$, MODEL$);
}
/**
* Creates a Builder by copying an existing Builder.
* @param other The existing Builder to copy.
*/
private Builder(org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder other) {
super(other);
if (isValidValue(fields()[0], other.partitionPath)) {
this.partitionPath = data().deepCopy(fields()[0].schema(), other.partitionPath);
fieldSetFlags()[0] = other.fieldSetFlags()[0];
}
if (isValidValue(fields()[1], other.successDeleteFiles)) {
this.successDeleteFiles = data().deepCopy(fields()[1].schema(), other.successDeleteFiles);
fieldSetFlags()[1] = other.fieldSetFlags()[1];
}
if (isValidValue(fields()[2], other.failedDeleteFiles)) {
this.failedDeleteFiles = data().deepCopy(fields()[2].schema(), other.failedDeleteFiles);
fieldSetFlags()[2] = other.fieldSetFlags()[2];
}
if (isValidValue(fields()[3], other.rollbackLogFiles)) {
this.rollbackLogFiles = data().deepCopy(fields()[3].schema(), other.rollbackLogFiles);
fieldSetFlags()[3] = other.fieldSetFlags()[3];
}
if (isValidValue(fields()[4], other.logFilesFromFailedCommit)) {
this.logFilesFromFailedCommit = data().deepCopy(fields()[4].schema(), other.logFilesFromFailedCommit);
fieldSetFlags()[4] = other.fieldSetFlags()[4];
}
}
/**
* Creates a Builder by copying an existing HoodieRollbackPartitionMetadata instance
* @param other The existing instance to copy.
*/
private Builder(org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata other) {
super(SCHEMA$, MODEL$);
if (isValidValue(fields()[0], other.partitionPath)) {
this.partitionPath = data().deepCopy(fields()[0].schema(), other.partitionPath);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.successDeleteFiles)) {
this.successDeleteFiles = data().deepCopy(fields()[1].schema(), other.successDeleteFiles);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.failedDeleteFiles)) {
this.failedDeleteFiles = data().deepCopy(fields()[2].schema(), other.failedDeleteFiles);
fieldSetFlags()[2] = true;
}
if (isValidValue(fields()[3], other.rollbackLogFiles)) {
this.rollbackLogFiles = data().deepCopy(fields()[3].schema(), other.rollbackLogFiles);
fieldSetFlags()[3] = true;
}
if (isValidValue(fields()[4], other.logFilesFromFailedCommit)) {
this.logFilesFromFailedCommit = data().deepCopy(fields()[4].schema(), other.logFilesFromFailedCommit);
fieldSetFlags()[4] = true;
}
}
/**
* Gets the value of the 'partitionPath' field.
* @return The value.
*/
public java.lang.String getPartitionPath() {
return partitionPath;
}
/**
* Sets the value of the 'partitionPath' field.
* @param value The value of 'partitionPath'.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder setPartitionPath(java.lang.String value) {
validate(fields()[0], value);
this.partitionPath = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'partitionPath' field has been set.
* @return True if the 'partitionPath' field has been set, false otherwise.
*/
public boolean hasPartitionPath() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'partitionPath' field.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder clearPartitionPath() {
partitionPath = null;
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'successDeleteFiles' field.
* @return The value.
*/
public java.util.List getSuccessDeleteFiles() {
return successDeleteFiles;
}
/**
* Sets the value of the 'successDeleteFiles' field.
* @param value The value of 'successDeleteFiles'.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder setSuccessDeleteFiles(java.util.List value) {
validate(fields()[1], value);
this.successDeleteFiles = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'successDeleteFiles' field has been set.
* @return True if the 'successDeleteFiles' field has been set, false otherwise.
*/
public boolean hasSuccessDeleteFiles() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'successDeleteFiles' field.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder clearSuccessDeleteFiles() {
successDeleteFiles = null;
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'failedDeleteFiles' field.
* @return The value.
*/
public java.util.List getFailedDeleteFiles() {
return failedDeleteFiles;
}
/**
* Sets the value of the 'failedDeleteFiles' field.
* @param value The value of 'failedDeleteFiles'.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder setFailedDeleteFiles(java.util.List value) {
validate(fields()[2], value);
this.failedDeleteFiles = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'failedDeleteFiles' field has been set.
* @return True if the 'failedDeleteFiles' field has been set, false otherwise.
*/
public boolean hasFailedDeleteFiles() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'failedDeleteFiles' field.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder clearFailedDeleteFiles() {
failedDeleteFiles = null;
fieldSetFlags()[2] = false;
return this;
}
/**
* Gets the value of the 'rollbackLogFiles' field.
* @return The value.
*/
public java.util.Map getRollbackLogFiles() {
return rollbackLogFiles;
}
/**
* Sets the value of the 'rollbackLogFiles' field.
* @param value The value of 'rollbackLogFiles'.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder setRollbackLogFiles(java.util.Map value) {
validate(fields()[3], value);
this.rollbackLogFiles = value;
fieldSetFlags()[3] = true;
return this;
}
/**
* Checks whether the 'rollbackLogFiles' field has been set.
* @return True if the 'rollbackLogFiles' field has been set, false otherwise.
*/
public boolean hasRollbackLogFiles() {
return fieldSetFlags()[3];
}
/**
* Clears the value of the 'rollbackLogFiles' field.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder clearRollbackLogFiles() {
rollbackLogFiles = null;
fieldSetFlags()[3] = false;
return this;
}
/**
* Gets the value of the 'logFilesFromFailedCommit' field.
* @return The value.
*/
public java.util.Map getLogFilesFromFailedCommit() {
return logFilesFromFailedCommit;
}
/**
* Sets the value of the 'logFilesFromFailedCommit' field.
* @param value The value of 'logFilesFromFailedCommit'.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder setLogFilesFromFailedCommit(java.util.Map value) {
validate(fields()[4], value);
this.logFilesFromFailedCommit = value;
fieldSetFlags()[4] = true;
return this;
}
/**
* Checks whether the 'logFilesFromFailedCommit' field has been set.
* @return True if the 'logFilesFromFailedCommit' field has been set, false otherwise.
*/
public boolean hasLogFilesFromFailedCommit() {
return fieldSetFlags()[4];
}
/**
* Clears the value of the 'logFilesFromFailedCommit' field.
* @return This builder.
*/
public org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata.Builder clearLogFilesFromFailedCommit() {
logFilesFromFailedCommit = null;
fieldSetFlags()[4] = false;
return this;
}
@Override
@SuppressWarnings("unchecked")
public HoodieRollbackPartitionMetadata build() {
try {
HoodieRollbackPartitionMetadata record = new HoodieRollbackPartitionMetadata();
record.partitionPath = fieldSetFlags()[0] ? this.partitionPath : (java.lang.String) defaultValue(fields()[0]);
record.successDeleteFiles = fieldSetFlags()[1] ? this.successDeleteFiles : (java.util.List) defaultValue(fields()[1]);
record.failedDeleteFiles = fieldSetFlags()[2] ? this.failedDeleteFiles : (java.util.List) defaultValue(fields()[2]);
record.rollbackLogFiles = fieldSetFlags()[3] ? this.rollbackLogFiles : (java.util.Map) defaultValue(fields()[3]);
record.logFilesFromFailedCommit = fieldSetFlags()[4] ? this.logFilesFromFailedCommit : (java.util.Map) defaultValue(fields()[4]);
return record;
} catch (org.apache.avro.AvroMissingFieldException e) {
throw e;
} catch (java.lang.Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumWriter
WRITER$ = (org.apache.avro.io.DatumWriter)MODEL$.createDatumWriter(SCHEMA$);
@Override public void writeExternal(java.io.ObjectOutput out)
throws java.io.IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumReader
READER$ = (org.apache.avro.io.DatumReader)MODEL$.createDatumReader(SCHEMA$);
@Override public void readExternal(java.io.ObjectInput in)
throws java.io.IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
@Override protected boolean hasCustomCoders() { return true; }
@Override public void customEncode(org.apache.avro.io.Encoder out)
throws java.io.IOException
{
out.writeString(this.partitionPath);
long size0 = this.successDeleteFiles.size();
out.writeArrayStart();
out.setItemCount(size0);
long actualSize0 = 0;
for (java.lang.String e0: this.successDeleteFiles) {
actualSize0++;
out.startItem();
out.writeString(e0);
}
out.writeArrayEnd();
if (actualSize0 != size0)
throw new java.util.ConcurrentModificationException("Array-size written was " + size0 + ", but element count was " + actualSize0 + ".");
long size1 = this.failedDeleteFiles.size();
out.writeArrayStart();
out.setItemCount(size1);
long actualSize1 = 0;
for (java.lang.String e1: this.failedDeleteFiles) {
actualSize1++;
out.startItem();
out.writeString(e1);
}
out.writeArrayEnd();
if (actualSize1 != size1)
throw new java.util.ConcurrentModificationException("Array-size written was " + size1 + ", but element count was " + actualSize1 + ".");
if (this.rollbackLogFiles == null) {
out.writeIndex(0);
out.writeNull();
} else {
out.writeIndex(1);
long size2 = this.rollbackLogFiles.size();
out.writeMapStart();
out.setItemCount(size2);
long actualSize2 = 0;
for (java.util.Map.Entry e2: this.rollbackLogFiles.entrySet()) {
actualSize2++;
out.startItem();
out.writeString(e2.getKey());
java.lang.Long v2 = e2.getValue();
out.writeLong(v2);
}
out.writeMapEnd();
if (actualSize2 != size2)
throw new java.util.ConcurrentModificationException("Map-size written was " + size2 + ", but element count was " + actualSize2 + ".");
}
if (this.logFilesFromFailedCommit == null) {
out.writeIndex(0);
out.writeNull();
} else {
out.writeIndex(1);
long size3 = this.logFilesFromFailedCommit.size();
out.writeMapStart();
out.setItemCount(size3);
long actualSize3 = 0;
for (java.util.Map.Entry e3: this.logFilesFromFailedCommit.entrySet()) {
actualSize3++;
out.startItem();
out.writeString(e3.getKey());
java.lang.Long v3 = e3.getValue();
out.writeLong(v3);
}
out.writeMapEnd();
if (actualSize3 != size3)
throw new java.util.ConcurrentModificationException("Map-size written was " + size3 + ", but element count was " + actualSize3 + ".");
}
}
@Override public void customDecode(org.apache.avro.io.ResolvingDecoder in)
throws java.io.IOException
{
org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff();
if (fieldOrder == null) {
this.partitionPath = in.readString();
long size0 = in.readArrayStart();
java.util.List a0 = this.successDeleteFiles;
if (a0 == null) {
a0 = new SpecificData.Array((int)size0, SCHEMA$.getField("successDeleteFiles").schema());
this.successDeleteFiles = a0;
} else a0.clear();
SpecificData.Array ga0 = (a0 instanceof SpecificData.Array ? (SpecificData.Array)a0 : null);
for ( ; 0 < size0; size0 = in.arrayNext()) {
for ( ; size0 != 0; size0--) {
java.lang.String e0 = (ga0 != null ? ga0.peek() : null);
e0 = in.readString();
a0.add(e0);
}
}
long size1 = in.readArrayStart();
java.util.List a1 = this.failedDeleteFiles;
if (a1 == null) {
a1 = new SpecificData.Array((int)size1, SCHEMA$.getField("failedDeleteFiles").schema());
this.failedDeleteFiles = a1;
} else a1.clear();
SpecificData.Array ga1 = (a1 instanceof SpecificData.Array ? (SpecificData.Array)a1 : null);
for ( ; 0 < size1; size1 = in.arrayNext()) {
for ( ; size1 != 0; size1--) {
java.lang.String e1 = (ga1 != null ? ga1.peek() : null);
e1 = in.readString();
a1.add(e1);
}
}
if (in.readIndex() != 1) {
in.readNull();
this.rollbackLogFiles = null;
} else {
long size2 = in.readMapStart();
java.util.Map m2 = this.rollbackLogFiles; // Need fresh name due to limitation of macro system
if (m2 == null) {
m2 = new java.util.HashMap((int)size2);
this.rollbackLogFiles = m2;
} else m2.clear();
for ( ; 0 < size2; size2 = in.mapNext()) {
for ( ; size2 != 0; size2--) {
java.lang.String k2 = null;
k2 = in.readString();
java.lang.Long v2 = null;
v2 = in.readLong();
m2.put(k2, v2);
}
}
}
if (in.readIndex() != 1) {
in.readNull();
this.logFilesFromFailedCommit = null;
} else {
long size3 = in.readMapStart();
java.util.Map m3 = this.logFilesFromFailedCommit; // Need fresh name due to limitation of macro system
if (m3 == null) {
m3 = new java.util.HashMap((int)size3);
this.logFilesFromFailedCommit = m3;
} else m3.clear();
for ( ; 0 < size3; size3 = in.mapNext()) {
for ( ; size3 != 0; size3--) {
java.lang.String k3 = null;
k3 = in.readString();
java.lang.Long v3 = null;
v3 = in.readLong();
m3.put(k3, v3);
}
}
}
} else {
for (int i = 0; i < 5; i++) {
switch (fieldOrder[i].pos()) {
case 0:
this.partitionPath = in.readString();
break;
case 1:
long size0 = in.readArrayStart();
java.util.List a0 = this.successDeleteFiles;
if (a0 == null) {
a0 = new SpecificData.Array((int)size0, SCHEMA$.getField("successDeleteFiles").schema());
this.successDeleteFiles = a0;
} else a0.clear();
SpecificData.Array ga0 = (a0 instanceof SpecificData.Array ? (SpecificData.Array)a0 : null);
for ( ; 0 < size0; size0 = in.arrayNext()) {
for ( ; size0 != 0; size0--) {
java.lang.String e0 = (ga0 != null ? ga0.peek() : null);
e0 = in.readString();
a0.add(e0);
}
}
break;
case 2:
long size1 = in.readArrayStart();
java.util.List a1 = this.failedDeleteFiles;
if (a1 == null) {
a1 = new SpecificData.Array((int)size1, SCHEMA$.getField("failedDeleteFiles").schema());
this.failedDeleteFiles = a1;
} else a1.clear();
SpecificData.Array ga1 = (a1 instanceof SpecificData.Array ? (SpecificData.Array)a1 : null);
for ( ; 0 < size1; size1 = in.arrayNext()) {
for ( ; size1 != 0; size1--) {
java.lang.String e1 = (ga1 != null ? ga1.peek() : null);
e1 = in.readString();
a1.add(e1);
}
}
break;
case 3:
if (in.readIndex() != 1) {
in.readNull();
this.rollbackLogFiles = null;
} else {
long size2 = in.readMapStart();
java.util.Map m2 = this.rollbackLogFiles; // Need fresh name due to limitation of macro system
if (m2 == null) {
m2 = new java.util.HashMap((int)size2);
this.rollbackLogFiles = m2;
} else m2.clear();
for ( ; 0 < size2; size2 = in.mapNext()) {
for ( ; size2 != 0; size2--) {
java.lang.String k2 = null;
k2 = in.readString();
java.lang.Long v2 = null;
v2 = in.readLong();
m2.put(k2, v2);
}
}
}
break;
case 4:
if (in.readIndex() != 1) {
in.readNull();
this.logFilesFromFailedCommit = null;
} else {
long size3 = in.readMapStart();
java.util.Map m3 = this.logFilesFromFailedCommit; // Need fresh name due to limitation of macro system
if (m3 == null) {
m3 = new java.util.HashMap((int)size3);
this.logFilesFromFailedCommit = m3;
} else m3.clear();
for ( ; 0 < size3; size3 = in.mapNext()) {
for ( ; size3 != 0; size3--) {
java.lang.String k3 = null;
k3 = in.readString();
java.lang.Long v3 = null;
v3 = in.readLong();
m3.put(k3, v3);
}
}
}
break;
default:
throw new java.io.IOException("Corrupt ResolvingDecoder.");
}
}
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy