All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.vertx.reactivex.grpc.server.GrpcServerResponse Maven / Gradle / Ivy

/*
 * Copyright 2014 Red Hat, Inc.
 *
 * Red Hat licenses this file to you under the Apache License, version 2.0
 * (the "License"); you may not use this file except in compliance with the
 * License.  You may obtain a copy of the License at:
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
 * License for the specific language governing permissions and limitations
 * under the License.
 */

package io.vertx.reactivex.grpc.server;

import io.vertx.reactivex.RxHelper;
import io.vertx.reactivex.ObservableHelper;
import io.vertx.reactivex.FlowableHelper;
import io.vertx.reactivex.impl.AsyncResultMaybe;
import io.vertx.reactivex.impl.AsyncResultSingle;
import io.vertx.reactivex.impl.AsyncResultCompletable;
import io.vertx.reactivex.WriteStreamObserver;
import io.vertx.reactivex.WriteStreamSubscriber;
import java.util.Map;
import java.util.Set;
import java.util.List;
import java.util.Iterator;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import io.vertx.core.Handler;
import io.vertx.core.AsyncResult;
import io.vertx.core.json.JsonObject;
import io.vertx.core.json.JsonArray;
import io.vertx.lang.rx.RxGen;
import io.vertx.lang.rx.TypeArg;
import io.vertx.lang.rx.MappingIterator;


@RxGen(io.vertx.grpc.server.GrpcServerResponse.class)
public class GrpcServerResponse extends io.vertx.reactivex.grpc.common.GrpcWriteStream {

  @Override
  public String toString() {
    return delegate.toString();
  }

  @Override
  public boolean equals(Object o) {
    if (this == o) return true;
    if (o == null || getClass() != o.getClass()) return false;
    GrpcServerResponse that = (GrpcServerResponse) o;
    return delegate.equals(that.delegate);
  }
  
  @Override
  public int hashCode() {
    return delegate.hashCode();
  }

  public static final TypeArg __TYPE_ARG = new TypeArg<>(    obj -> new GrpcServerResponse((io.vertx.grpc.server.GrpcServerResponse) obj),
    GrpcServerResponse::getDelegate
  );

  private final io.vertx.grpc.server.GrpcServerResponse delegate;
  public final TypeArg __typeArg_0;
  public final TypeArg __typeArg_1;
  
  public GrpcServerResponse(io.vertx.grpc.server.GrpcServerResponse delegate) {
    super(delegate);
    this.delegate = delegate;
    this.__typeArg_0 = TypeArg.unknown();    this.__typeArg_1 = TypeArg.unknown();  }

  public GrpcServerResponse(Object delegate, TypeArg typeArg_0, TypeArg typeArg_1) {
    super((io.vertx.grpc.server.GrpcServerResponse)delegate);
    this.delegate = (io.vertx.grpc.server.GrpcServerResponse)delegate;
    this.__typeArg_0 = typeArg_0;
    this.__typeArg_1 = typeArg_1;
  }

  public io.vertx.grpc.server.GrpcServerResponse getDelegate() {
    return delegate;
  }

  private WriteStreamObserver observer;
  private WriteStreamSubscriber subscriber;

  public synchronized WriteStreamObserver toObserver() {
    if (observer == null) {
      Function conv = (Function) __typeArg_0.unwrap;
      observer = RxHelper.toObserver(getDelegate(), conv);
    }
    return observer;
  }

  public synchronized WriteStreamSubscriber toSubscriber() {
    if (subscriber == null) {
      Function conv = (Function) __typeArg_0.unwrap;
      subscriber = RxHelper.toSubscriber(getDelegate(), conv);
    }
    return subscriber;
  }


  /**
   * Write some data to the stream.
   *
   * 

The data is usually put on an internal write queue, and the write actually happens * asynchronously. To avoid running out of memory by putting too much on the write queue, * check the {@link io.vertx.reactivex.core.streams.WriteStream#writeQueueFull} method before writing. This is done automatically if * using a . * *

When the data is moved from the queue to the actual medium, the returned * will be completed with the write result, e.g the future is succeeded * when a server HTTP response buffer is written to the socket and failed if the remote * client has closed the socket while the data was still pending for write. * @param data the data to write * @return a future completed with the write result */ public io.vertx.core.Future write(Resp data) { io.vertx.core.Future ret = delegate.write(__typeArg_1.unwrap(data)).map(val -> val); return ret; } /** * Write some data to the stream. * *

The data is usually put on an internal write queue, and the write actually happens * asynchronously. To avoid running out of memory by putting too much on the write queue, * check the {@link io.vertx.reactivex.core.streams.WriteStream#writeQueueFull} method before writing. This is done automatically if * using a . * *

When the data is moved from the queue to the actual medium, the returned * will be completed with the write result, e.g the future is succeeded * when a server HTTP response buffer is written to the socket and failed if the remote * client has closed the socket while the data was still pending for write. * @param data the data to write * @return a future completed with the write result */ public io.reactivex.Completable rxWrite(Resp data) { return AsyncResultCompletable.toCompletable($handler -> { this.write(data).onComplete($handler); }); } /** * Ends the stream. *

* Once the stream has ended, it cannot be used any more. * @return a future completed with the result */ public io.vertx.core.Future end() { io.vertx.core.Future ret = delegate.end().map(val -> val); return ret; } /** * Ends the stream. *

* Once the stream has ended, it cannot be used any more. * @return a future completed with the result */ public io.reactivex.Completable rxEnd() { return AsyncResultCompletable.toCompletable($handler -> { this.end().onComplete($handler); }); } /** * Same as {@link io.vertx.reactivex.core.streams.WriteStream#end} but writes some data to the stream before ending. * @param data the data to write * @return a future completed with the result */ public io.vertx.core.Future end(Resp data) { io.vertx.core.Future ret = delegate.end(__typeArg_1.unwrap(data)).map(val -> val); return ret; } /** * Same as {@link io.vertx.reactivex.core.streams.WriteStream#end} but writes some data to the stream before ending. * @param data the data to write * @return a future completed with the result */ public io.reactivex.Completable rxEnd(Resp data) { return AsyncResultCompletable.toCompletable($handler -> { this.end(data).onComplete($handler); }); } /** * This will return true if there are more bytes in the write queue than the value set using {@link io.vertx.reactivex.grpc.server.GrpcServerResponse#setWriteQueueMaxSize} * @return true if write queue is full */ public boolean writeQueueFull() { boolean ret = delegate.writeQueueFull(); return ret; } /** * Set the stream format, e.g. proto or json. * * It must be called before sending any message, otherwise proto will be used. * @param format the message format * @return a reference to this, so the API can be used fluently */ public io.vertx.reactivex.grpc.common.GrpcWriteStream format(io.vertx.grpc.common.WireFormat format) { delegate.format(format); return this; } /** * Set the grpc status response * @param status the status * @return a reference to this, so the API can be used fluently */ public io.vertx.reactivex.grpc.server.GrpcServerResponse status(io.vertx.grpc.common.GrpcStatus status) { delegate.status(status); return this; } /** * Set the grpc status response message * @param msg the message * @return a reference to this, so the API can be used fluently */ public io.vertx.reactivex.grpc.server.GrpcServerResponse statusMessage(java.lang.String msg) { delegate.statusMessage(msg); return this; } public io.vertx.reactivex.grpc.server.GrpcServerResponse encoding(java.lang.String encoding) { delegate.encoding(encoding); return this; } /** * @return the to write metadata trailers */ public io.vertx.core.MultiMap trailers() { io.vertx.core.MultiMap ret = delegate.trailers(); return ret; } public io.vertx.reactivex.grpc.server.GrpcServerResponse exceptionHandler(io.vertx.core.Handler handler) { delegate.exceptionHandler(handler); return this; } public io.vertx.reactivex.grpc.server.GrpcServerResponse setWriteQueueMaxSize(int maxSize) { delegate.setWriteQueueMaxSize(maxSize); return this; } public io.vertx.reactivex.grpc.server.GrpcServerResponse drainHandler(io.vertx.core.Handler handler) { delegate.drainHandler(handler); return this; } public io.vertx.core.Future send(Resp item) { io.vertx.core.Future ret = delegate.send(__typeArg_1.unwrap(item)).map(val -> val); return ret; } public io.reactivex.Completable rxSend(Resp item) { return AsyncResultCompletable.toCompletable($handler -> { this.send(item).onComplete($handler); }); } public io.vertx.core.Future send(io.vertx.reactivex.core.streams.ReadStream body) { io.vertx.core.Future ret = delegate.send(body.getDelegate()).map(val -> val); return ret; } public io.reactivex.Completable rxSend(io.vertx.reactivex.core.streams.ReadStream body) { return AsyncResultCompletable.toCompletable($handler -> { this.send(body).onComplete($handler); }); } public io.vertx.core.Future send(io.reactivex.Flowable body) { io.vertx.core.Future ret = delegate.send(io.vertx.reactivex.impl.ReadStreamSubscriber.asReadStream(body, obj -> __typeArg_1.unwrap(obj)).resume()).map(val -> val); return ret; } public io.reactivex.Completable rxSend(io.reactivex.Flowable body) { return AsyncResultCompletable.toCompletable($handler -> { this.send(body).onComplete($handler); }); } public static GrpcServerResponse newInstance(io.vertx.grpc.server.GrpcServerResponse arg) { return arg != null ? new GrpcServerResponse(arg) : null; } public static GrpcServerResponse newInstance(io.vertx.grpc.server.GrpcServerResponse arg, TypeArg __typeArg_Req, TypeArg __typeArg_Resp) { return arg != null ? new GrpcServerResponse(arg, __typeArg_Req, __typeArg_Resp) : null; } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy