
io.vertx.rxjava.kafka.client.producer.KafkaProducer Maven / Gradle / Ivy
/*
* Copyright 2014 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.vertx.rxjava.kafka.client.producer;
import rx.Observable;
import rx.Single;
import io.vertx.rx.java.RxHelper;
import io.vertx.rx.java.WriteStreamSubscriber;
import io.vertx.rx.java.SingleOnSubscribeAdapter;
import java.util.Map;
import java.util.Set;
import java.util.List;
import java.util.Iterator;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import io.vertx.core.Handler;
import io.vertx.core.AsyncResult;
import io.vertx.core.json.JsonObject;
import io.vertx.core.json.JsonArray;
import io.vertx.lang.rx.RxGen;
import io.vertx.lang.rx.TypeArg;
import io.vertx.lang.rx.MappingIterator;
/**
* Vert.x Kafka producer.
*
* The {@link io.vertx.rxjava.core.streams.WriteStream#write} provides global control over writing a record.
*
*
* NOTE: This class has been automatically generated from the {@link io.vertx.kafka.client.producer.KafkaProducer original} non RX-ified interface using Vert.x codegen.
*/
@RxGen(io.vertx.kafka.client.producer.KafkaProducer.class)
public class KafkaProducer implements io.vertx.rxjava.core.streams.WriteStream> {
@Override
public String toString() {
return delegate.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
KafkaProducer that = (KafkaProducer) o;
return delegate.equals(that.delegate);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
public static final TypeArg __TYPE_ARG = new TypeArg<>( obj -> new KafkaProducer((io.vertx.kafka.client.producer.KafkaProducer) obj),
KafkaProducer::getDelegate
);
private final io.vertx.kafka.client.producer.KafkaProducer delegate;
public final TypeArg __typeArg_0;
public final TypeArg __typeArg_1;
public KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer delegate) {
this.delegate = delegate;
this.__typeArg_0 = TypeArg.unknown(); this.__typeArg_1 = TypeArg.unknown(); }
public KafkaProducer(Object delegate, TypeArg typeArg_0, TypeArg typeArg_1) {
this.delegate = (io.vertx.kafka.client.producer.KafkaProducer)delegate;
this.__typeArg_0 = typeArg_0;
this.__typeArg_1 = typeArg_1;
}
public io.vertx.kafka.client.producer.KafkaProducer getDelegate() {
return delegate;
}
private WriteStreamSubscriber> subscriber;
public synchronized WriteStreamSubscriber> toSubscriber() {
if (subscriber == null) {
Function> conv = io.vertx.rxjava.kafka.client.producer.KafkaProducerRecord::getDelegate;
subscriber = RxHelper.toSubscriber(getDelegate(), conv);
}
return subscriber;
}
/**
* Write some data to the stream.
*
* The data is usually put on an internal write queue, and the write actually happens
* asynchronously. To avoid running out of memory by putting too much on the write queue,
* check the {@link io.vertx.rxjava.core.streams.WriteStream#writeQueueFull} method before writing. This is done automatically if
* using a .
*
*
When the data
is moved from the queue to the actual medium, the returned
* will be completed with the write result, e.g the future is succeeded
* when a server HTTP response buffer is written to the socket and failed if the remote
* client has closed the socket while the data was still pending for write.
* @param data the data to write
* @return a future completed with the write result
*/
public io.vertx.core.Future write(io.vertx.rxjava.kafka.client.producer.KafkaProducerRecord data) {
io.vertx.core.Future ret = delegate.write(data.getDelegate()).map(val -> val);
return ret;
}
/**
* Write some data to the stream.
*
* The data is usually put on an internal write queue, and the write actually happens
* asynchronously. To avoid running out of memory by putting too much on the write queue,
* check the {@link io.vertx.rxjava.core.streams.WriteStream#writeQueueFull} method before writing. This is done automatically if
* using a .
*
*
When the data
is moved from the queue to the actual medium, the returned
* will be completed with the write result, e.g the future is succeeded
* when a server HTTP response buffer is written to the socket and failed if the remote
* client has closed the socket while the data was still pending for write.
* @param data the data to write
* @return a future completed with the write result
*/
public rx.Single rxWrite(io.vertx.rxjava.kafka.client.producer.KafkaProducerRecord data) {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.write(data).onComplete(fut);
}));
}
/**
* Ends the stream.
*
* Once the stream has ended, it cannot be used any more.
* @return a future completed with the result
*/
public io.vertx.core.Future end() {
io.vertx.core.Future ret = delegate.end().map(val -> val);
return ret;
}
/**
* Ends the stream.
*
* Once the stream has ended, it cannot be used any more.
* @return a future completed with the result
*/
public rx.Single rxEnd() {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.end().onComplete(fut);
}));
}
/**
* Same as {@link io.vertx.rxjava.core.streams.WriteStream#end} but writes some data to the stream before ending.
* @param data the data to write
* @return a future completed with the result
*/
public io.vertx.core.Future end(io.vertx.rxjava.kafka.client.producer.KafkaProducerRecord data) {
io.vertx.core.Future ret = delegate.end(data.getDelegate()).map(val -> val);
return ret;
}
/**
* Same as {@link io.vertx.rxjava.core.streams.WriteStream#end} but writes some data to the stream before ending.
* @param data the data to write
* @return a future completed with the result
*/
public rx.Single rxEnd(io.vertx.rxjava.kafka.client.producer.KafkaProducerRecord data) {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.end(data).onComplete(fut);
}));
}
/**
* This will return true
if there are more bytes in the write queue than the value set using {@link io.vertx.rxjava.kafka.client.producer.KafkaProducer#setWriteQueueMaxSize}
* @return true
if write queue is full
*/
public boolean writeQueueFull() {
boolean ret = delegate.writeQueueFull();
return ret;
}
/**
* Get or create a KafkaProducer instance which shares its stream with any other KafkaProducer created with the same name
*
* When close
has been called for each shared producer the resources will be released.
* Calling end
closes all shared producers.
* @param vertx Vert.x instance to use
* @param name the producer name to identify it
* @param config Kafka producer configuration
* @return an instance of the KafkaProducer
*/
public static io.vertx.rxjava.kafka.client.producer.KafkaProducer createShared(io.vertx.rxjava.core.Vertx vertx, java.lang.String name, java.util.Map config) {
io.vertx.rxjava.kafka.client.producer.KafkaProducer ret = io.vertx.rxjava.kafka.client.producer.KafkaProducer.newInstance((io.vertx.kafka.client.producer.KafkaProducer)io.vertx.kafka.client.producer.KafkaProducer.createShared(vertx.getDelegate(), name, config), TypeArg.unknown(), TypeArg.unknown());
return ret;
}
/**
* Get or create a KafkaProducer instance which shares its stream with any other KafkaProducer created with the same name
*
* When close
has been called for each shared producer the resources will be released.
* Calling end
closes all shared producers.
* @param vertx Vert.x instance to use
* @param name the producer name to identify it
* @param options Kafka producer options
* @return an instance of the KafkaProducer
*/
public static io.vertx.rxjava.kafka.client.producer.KafkaProducer createShared(io.vertx.rxjava.core.Vertx vertx, java.lang.String name, io.vertx.kafka.client.common.KafkaClientOptions options) {
io.vertx.rxjava.kafka.client.producer.KafkaProducer ret = io.vertx.rxjava.kafka.client.producer.KafkaProducer.newInstance((io.vertx.kafka.client.producer.KafkaProducer)io.vertx.kafka.client.producer.KafkaProducer.createShared(vertx.getDelegate(), name, options), TypeArg.unknown(), TypeArg.unknown());
return ret;
}
/**
* Get or create a KafkaProducer instance which shares its stream with any other KafkaProducer created with the same name
*
* When close
has been called for each shared producer the resources will be released.
* Calling end
closes all shared producers.
* @param vertx Vert.x instance to use
* @param name the producer name to identify it
* @param config Kafka producer configuration
* @param keyType class type for the key serialization
* @param valueType class type for the value serialization
* @return an instance of the KafkaProducer
*/
public static io.vertx.rxjava.kafka.client.producer.KafkaProducer createShared(io.vertx.rxjava.core.Vertx vertx, java.lang.String name, java.util.Map config, java.lang.Class keyType, java.lang.Class valueType) {
io.vertx.rxjava.kafka.client.producer.KafkaProducer ret = io.vertx.rxjava.kafka.client.producer.KafkaProducer.newInstance((io.vertx.kafka.client.producer.KafkaProducer)io.vertx.kafka.client.producer.KafkaProducer.createShared(vertx.getDelegate(), name, config, io.vertx.lang.rxjava.Helper.unwrap(keyType), io.vertx.lang.rxjava.Helper.unwrap(valueType)), TypeArg.of(keyType), TypeArg.of(valueType));
return ret;
}
/**
* Get or create a KafkaProducer instance which shares its stream with any other KafkaProducer created with the same name
*
* When close
has been called for each shared producer the resources will be released.
* Calling end
closes all shared producers.
* @param vertx Vert.x instance to use
* @param name the producer name to identify it
* @param options Kafka producer options
* @param keyType class type for the key serialization
* @param valueType class type for the value serialization
* @return an instance of the KafkaProducer
*/
public static io.vertx.rxjava.kafka.client.producer.KafkaProducer createShared(io.vertx.rxjava.core.Vertx vertx, java.lang.String name, io.vertx.kafka.client.common.KafkaClientOptions options, java.lang.Class keyType, java.lang.Class valueType) {
io.vertx.rxjava.kafka.client.producer.KafkaProducer ret = io.vertx.rxjava.kafka.client.producer.KafkaProducer.newInstance((io.vertx.kafka.client.producer.KafkaProducer)io.vertx.kafka.client.producer.KafkaProducer.createShared(vertx.getDelegate(), name, options, io.vertx.lang.rxjava.Helper.unwrap(keyType), io.vertx.lang.rxjava.Helper.unwrap(valueType)), TypeArg.of(keyType), TypeArg.of(valueType));
return ret;
}
/**
* Create a new KafkaProducer instance
* @param vertx Vert.x instance to use
* @param config Kafka producer configuration
* @return an instance of the KafkaProducer
*/
public static io.vertx.rxjava.kafka.client.producer.KafkaProducer create(io.vertx.rxjava.core.Vertx vertx, java.util.Map config) {
io.vertx.rxjava.kafka.client.producer.KafkaProducer ret = io.vertx.rxjava.kafka.client.producer.KafkaProducer.newInstance((io.vertx.kafka.client.producer.KafkaProducer)io.vertx.kafka.client.producer.KafkaProducer.create(vertx.getDelegate(), config), TypeArg.unknown(), TypeArg.unknown());
return ret;
}
/**
* Create a new KafkaProducer instance
* @param vertx Vert.x instance to use
* @param config Kafka producer configuration
* @param keyType class type for the key serialization
* @param valueType class type for the value serialization
* @return an instance of the KafkaProducer
*/
public static io.vertx.rxjava.kafka.client.producer.KafkaProducer create(io.vertx.rxjava.core.Vertx vertx, java.util.Map config, java.lang.Class keyType, java.lang.Class valueType) {
io.vertx.rxjava.kafka.client.producer.KafkaProducer ret = io.vertx.rxjava.kafka.client.producer.KafkaProducer.newInstance((io.vertx.kafka.client.producer.KafkaProducer)io.vertx.kafka.client.producer.KafkaProducer.create(vertx.getDelegate(), config, io.vertx.lang.rxjava.Helper.unwrap(keyType), io.vertx.lang.rxjava.Helper.unwrap(valueType)), TypeArg.of(keyType), TypeArg.of(valueType));
return ret;
}
/**
* Initializes the underlying kafka transactional producer. See {@link io.vertx.rxjava.kafka.client.producer.KafkaProducer#initTransactions} ()}
* @return a future notified with the result
*/
public io.vertx.core.Future initTransactions() {
io.vertx.core.Future ret = delegate.initTransactions().map(val -> val);
return ret;
}
/**
* Initializes the underlying kafka transactional producer. See {@link io.vertx.rxjava.kafka.client.producer.KafkaProducer#initTransactions} ()}
* @return a future notified with the result
*/
public rx.Single rxInitTransactions() {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.initTransactions().onComplete(fut);
}));
}
/**
* Starts a new kafka transaction. See {@link io.vertx.rxjava.kafka.client.producer.KafkaProducer#beginTransaction}
* @return a future notified with the result
*/
public io.vertx.core.Future beginTransaction() {
io.vertx.core.Future ret = delegate.beginTransaction().map(val -> val);
return ret;
}
/**
* Starts a new kafka transaction. See {@link io.vertx.rxjava.kafka.client.producer.KafkaProducer#beginTransaction}
* @return a future notified with the result
*/
public rx.Single rxBeginTransaction() {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.beginTransaction().onComplete(fut);
}));
}
/**
* Commits the ongoing transaction. See {@link io.vertx.rxjava.kafka.client.producer.KafkaProducer#commitTransaction}
* @return a future notified with the result
*/
public io.vertx.core.Future commitTransaction() {
io.vertx.core.Future ret = delegate.commitTransaction().map(val -> val);
return ret;
}
/**
* Commits the ongoing transaction. See {@link io.vertx.rxjava.kafka.client.producer.KafkaProducer#commitTransaction}
* @return a future notified with the result
*/
public rx.Single rxCommitTransaction() {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.commitTransaction().onComplete(fut);
}));
}
/**
* Aborts the ongoing transaction. See {@link org.apache.kafka.clients.producer.KafkaProducer}
* @return a future notified with the result
*/
public io.vertx.core.Future abortTransaction() {
io.vertx.core.Future ret = delegate.abortTransaction().map(val -> val);
return ret;
}
/**
* Aborts the ongoing transaction. See {@link org.apache.kafka.clients.producer.KafkaProducer}
* @return a future notified with the result
*/
public rx.Single rxAbortTransaction() {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.abortTransaction().onComplete(fut);
}));
}
public io.vertx.rxjava.kafka.client.producer.KafkaProducer exceptionHandler(io.vertx.core.Handler handler) {
delegate.exceptionHandler(handler);
return this;
}
public io.vertx.rxjava.kafka.client.producer.KafkaProducer setWriteQueueMaxSize(int i) {
delegate.setWriteQueueMaxSize(i);
return this;
}
public io.vertx.rxjava.kafka.client.producer.KafkaProducer drainHandler(io.vertx.core.Handler handler) {
delegate.drainHandler(handler);
return this;
}
/**
* Asynchronously write a record to a topic
* @param record record to write
* @return a Future
completed with the record metadata
*/
public io.vertx.core.Future send(io.vertx.rxjava.kafka.client.producer.KafkaProducerRecord record) {
io.vertx.core.Future ret = delegate.send(record.getDelegate()).map(val -> val);
return ret;
}
/**
* Asynchronously write a record to a topic
* @param record record to write
* @return a Future
completed with the record metadata
*/
public rx.Single rxSend(io.vertx.rxjava.kafka.client.producer.KafkaProducerRecord record) {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.send(record).onComplete(fut);
}));
}
/**
* Get the partition metadata for the give topic.
* @param topic topic partition for which getting partitions info
* @return a future notified with the result
*/
public io.vertx.core.Future> partitionsFor(java.lang.String topic) {
io.vertx.core.Future> ret = delegate.partitionsFor(topic).map(val -> val);
return ret;
}
/**
* Get the partition metadata for the give topic.
* @param topic topic partition for which getting partitions info
* @return a future notified with the result
*/
public rx.Single> rxPartitionsFor(java.lang.String topic) {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.partitionsFor(topic).onComplete(fut);
}));
}
/**
* Invoking this method makes all buffered records immediately available to write
* @return a future notified with the result
*/
public io.vertx.core.Future flush() {
io.vertx.core.Future ret = delegate.flush().map(val -> val);
return ret;
}
/**
* Invoking this method makes all buffered records immediately available to write
* @return a future notified with the result
*/
public rx.Single rxFlush() {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.flush().onComplete(fut);
}));
}
/**
* Close the producer
* @return a Future
completed with the operation result
*/
public io.vertx.core.Future close() {
io.vertx.core.Future ret = delegate.close().map(val -> val);
return ret;
}
/**
* Close the producer
* @return a Future
completed with the operation result
*/
public rx.Single rxClose() {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.close().onComplete(fut);
}));
}
/**
* Close the producer
* @param timeout
* @return a future notified with the result
*/
public io.vertx.core.Future close(long timeout) {
io.vertx.core.Future ret = delegate.close(timeout).map(val -> val);
return ret;
}
/**
* Close the producer
* @param timeout
* @return a future notified with the result
*/
public rx.Single rxClose(long timeout) {
return Single.create(new SingleOnSubscribeAdapter<>(fut -> {
this.close(timeout).onComplete(fut);
}));
}
/**
* Create a new KafkaProducer instance from a native .
* @param vertx Vert.x instance to use
* @param producer the Kafka producer to wrap
* @return an instance of the KafkaProducer
*/
public static io.vertx.rxjava.kafka.client.producer.KafkaProducer create(io.vertx.rxjava.core.Vertx vertx, org.apache.kafka.clients.producer.Producer producer) {
io.vertx.rxjava.kafka.client.producer.KafkaProducer ret = io.vertx.rxjava.kafka.client.producer.KafkaProducer.newInstance((io.vertx.kafka.client.producer.KafkaProducer)io.vertx.kafka.client.producer.KafkaProducer.create(vertx.getDelegate(), producer), TypeArg.unknown(), TypeArg.unknown());
return ret;
}
/**
* Create a new KafkaProducer instance from a native .
* @param vertx Vert.x instance to use
* @param producer the Kafka producer to wrap
* @param options options used only for tracing settings
* @return an instance of the KafkaProducer
*/
public static io.vertx.rxjava.kafka.client.producer.KafkaProducer create(io.vertx.rxjava.core.Vertx vertx, org.apache.kafka.clients.producer.Producer producer, io.vertx.kafka.client.common.KafkaClientOptions options) {
io.vertx.rxjava.kafka.client.producer.KafkaProducer ret = io.vertx.rxjava.kafka.client.producer.KafkaProducer.newInstance((io.vertx.kafka.client.producer.KafkaProducer)io.vertx.kafka.client.producer.KafkaProducer.create(vertx.getDelegate(), producer, options), TypeArg.unknown(), TypeArg.unknown());
return ret;
}
public static KafkaProducer newInstance(io.vertx.kafka.client.producer.KafkaProducer arg) {
return arg != null ? new KafkaProducer(arg) : null;
}
public static KafkaProducer newInstance(io.vertx.kafka.client.producer.KafkaProducer arg, TypeArg __typeArg_K, TypeArg __typeArg_V) {
return arg != null ? new KafkaProducer(arg, __typeArg_K, __typeArg_V) : null;
}
}