Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
public class OffsetDeleteRequestData implements ApiMessage {
private String groupId;
private OffsetDeleteRequestTopicCollection topics;
private List _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("group_id", Type.STRING, "The unique group identifier."),
new Field("topics", new ArrayOf(OffsetDeleteRequestTopic.SCHEMA_0), "The topics to delete offsets for")
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public OffsetDeleteRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public OffsetDeleteRequestData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public OffsetDeleteRequestData() {
this.groupId = "";
this.topics = new OffsetDeleteRequestTopicCollection(0);
}
@Override
public short apiKey() {
return 47;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field groupId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field groupId had invalid length " + length);
} else {
this.groupId = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
OffsetDeleteRequestTopicCollection newCollection = new OffsetDeleteRequestTopicCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new OffsetDeleteRequestTopic(_readable, _version));
}
this.topics = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(groupId);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(topics.size());
for (OffsetDeleteRequestTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
this.groupId = struct.getString("group_id");
{
Object[] _nestedObjects = struct.getArray("topics");
this.topics = new OffsetDeleteRequestTopicCollection(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.topics.add(new OffsetDeleteRequestTopic((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("group_id", this.groupId);
{
Struct[] _nestedObjects = new Struct[topics.size()];
int i = 0;
for (OffsetDeleteRequestTopic element : this.topics) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("topics", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
{
byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'groupId' field is too long to be serialized");
}
_cache.cacheSerializedValue(groupId, _stringBytes);
_size += _stringBytes.length + 2;
}
{
int _arraySize = 0;
_arraySize += 4;
for (OffsetDeleteRequestTopic topicsElement : topics) {
_arraySize += topicsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof OffsetDeleteRequestData)) return false;
OffsetDeleteRequestData other = (OffsetDeleteRequestData) obj;
if (this.groupId == null) {
if (other.groupId != null) return false;
} else {
if (!this.groupId.equals(other.groupId)) return false;
}
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode());
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public String toString() {
return "OffsetDeleteRequestData("
+ "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'")
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public String groupId() {
return this.groupId;
}
public OffsetDeleteRequestTopicCollection topics() {
return this.topics;
}
@Override
public List unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public OffsetDeleteRequestData setGroupId(String v) {
this.groupId = v;
return this;
}
public OffsetDeleteRequestData setTopics(OffsetDeleteRequestTopicCollection v) {
this.topics = v;
return this;
}
static public class OffsetDeleteRequestTopic implements Message, ImplicitLinkedHashMultiCollection.Element {
private String name;
private List partitions;
private List _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partitions", new ArrayOf(OffsetDeleteRequestPartition.SCHEMA_0), "Each partition to delete offsets for.")
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public OffsetDeleteRequestTopic(Readable _readable, short _version) {
read(_readable, _version);
}
public OffsetDeleteRequestTopic(Struct struct, short _version) {
fromStruct(struct, _version);
}
public OffsetDeleteRequestTopic() {
this.name = "";
this.partitions = new ArrayList();
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteRequestTopic");
}
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
ArrayList newCollection = new ArrayList(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new OffsetDeleteRequestPartition(_readable, _version));
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetDeleteRequestTopic");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(partitions.size());
for (OffsetDeleteRequestPartition partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteRequestTopic");
}
this._unknownTaggedFields = null;
this.name = struct.getString("name");
{
Object[] _nestedObjects = struct.getArray("partitions");
this.partitions = new ArrayList(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.partitions.add(new OffsetDeleteRequestPartition((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetDeleteRequestTopic");
}
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("name", this.name);
{
Struct[] _nestedObjects = new Struct[partitions.size()];
int i = 0;
for (OffsetDeleteRequestPartition element : this.partitions) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("partitions", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetDeleteRequestTopic");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
_size += _stringBytes.length + 2;
}
{
int _arraySize = 0;
_arraySize += 4;
for (OffsetDeleteRequestPartition partitionsElement : partitions) {
_arraySize += partitionsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof OffsetDeleteRequestTopic)) return false;
OffsetDeleteRequestTopic other = (OffsetDeleteRequestTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
return hashCode;
}
@Override
public String toString() {
return "OffsetDeleteRequestTopic("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List partitions() {
return this.partitions;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public OffsetDeleteRequestTopic setName(String v) {
this.name = v;
return this;
}
public OffsetDeleteRequestTopic setPartitions(List v) {
this.partitions = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
static public class OffsetDeleteRequestPartition implements Message {
private int partitionIndex;
private List _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index.")
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public OffsetDeleteRequestPartition(Readable _readable, short _version) {
read(_readable, _version);
}
public OffsetDeleteRequestPartition(Struct struct, short _version) {
fromStruct(struct, _version);
}
public OffsetDeleteRequestPartition() {
this.partitionIndex = 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteRequestPartition");
}
this.partitionIndex = _readable.readInt();
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetDeleteRequestPartition");
}
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteRequestPartition");
}
this._unknownTaggedFields = null;
this.partitionIndex = struct.getInt("partition_index");
}
@Override
public Struct toStruct(short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetDeleteRequestPartition");
}
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("partition_index", this.partitionIndex);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetDeleteRequestPartition");
}
_size += 4;
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof OffsetDeleteRequestPartition)) return false;
OffsetDeleteRequestPartition other = (OffsetDeleteRequestPartition) obj;
if (partitionIndex != other.partitionIndex) return false;
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
return hashCode;
}
@Override
public String toString() {
return "OffsetDeleteRequestPartition("
+ "partitionIndex=" + partitionIndex
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
@Override
public List unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public OffsetDeleteRequestPartition setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
}
public static class OffsetDeleteRequestTopicCollection extends ImplicitLinkedHashMultiCollection {
public OffsetDeleteRequestTopicCollection() {
super();
}
public OffsetDeleteRequestTopicCollection(int expectedNumElements) {
super(expectedNumElements);
}
public OffsetDeleteRequestTopicCollection(Iterator iterator) {
super(iterator);
}
public OffsetDeleteRequestTopic find(String name) {
OffsetDeleteRequestTopic key = new OffsetDeleteRequestTopic();
key.setName(name);
return find(key);
}
public List findAll(String name) {
OffsetDeleteRequestTopic key = new OffsetDeleteRequestTopic();
key.setName(name);
return findAll(key);
}
}
}