All Downloads are FREE. Search and download functionalities are using the official Maven repository.

kko.pekko-http-core_3.1.1.0.source-code.reference.conf Maven / Gradle / Ivy

# SPDX-License-Identifier: Apache-2.0

#########################################
# pekko-http-core Reference Config File #
#########################################

# This is the reference config file that contains all the default settings.
# Make your edits/overrides in your application.conf.

# Pekko HTTP version, checked against the runtime version of Pekko HTTP.
# Loaded from generated conf file.
include "pekko-http-version"

pekko.http {

  server {
    # The default value of the `Server` header to produce if no
    # explicit `Server`-header was included in a response.
    # If this value is the empty string and no header was included in
    # the request, no `Server` header will be rendered at all.
    server-header = pekko-http/${pekko.http.version}

    # "PREVIEW" features that are not yet fully production ready.
    # These flags can change or be removed between patch releases.
    preview {
      # If this setting is enabled `Http().newServerAt(...).bind` and `bindSync`
      # will be enabled to use HTTP/2.
      #
      # `Http().newServerAt(...).bindFlow` and `connectionSource()` are not supported.
      enable-http2 = off
    }

    # The time after which an idle connection will be automatically closed.
    # Set to `infinite` to completely disable idle connection timeouts.
    idle-timeout = 60 s

    # Defines the default time period within which the application has to
    # produce an HttpResponse for any given HttpRequest it received.
    # The timeout begins to run when the *end* of the request has been
    # received, so even potentially long uploads can have a short timeout.
    # Set to `infinite` to completely disable request timeout checking.
    #
    # Make sure this timeout is smaller than the idle-timeout, otherwise,
    # the idle-timeout will kick in first and reset the TCP connection
    # without a response.
    #
    # If this setting is not `off` the HTTP server layer attaches a
    # `Timeout-Access` header to the request, which enables programmatic
    # customization of the timeout period and timeout response for each
    # request individually.
    request-timeout = 20 s

    # The time period within which the TCP binding process must be completed.
    bind-timeout = 1s

    # Default port to bind HTTP server to when no port was explicitly given.
    default-http-port = 80

    # Default port to bind HTTPS server to when no port was explicitly given.
    default-https-port = 443

    # The time period the HTTP server implementation will keep a connection open after
    # all data has been delivered to the network layer. This setting is similar to the SO_LINGER socket option
    # but does not only include the OS-level socket but also covers the Pekko IO / Pekko Streams network stack.
    # The setting is an extra precaution that prevents clients from keeping open a connection that is
    # already considered completed from the server side.
    #
    # If the network level buffers (including the Pekko Stream / Pekko IO networking stack buffers)
    # contains more data than can be transferred to the client in the given time when the server-side considers
    # to be finished with this connection, the client may encounter a connection reset.
    #
    # Set to 'infinite' to disable automatic connection closure (which will risk to leak connections).
    linger-timeout = 1 min

    # The maximum number of concurrently accepted connections when binding a server using
    # `Http().newServerAt().bindXYZ()` methods.
    #
    # This setting doesn't apply to the `Http().bind` method which will still
    # deliver an unlimited backpressured stream of incoming connections.
    #
    # Note, that this setting limits the number of the connections on a best-effort basis.
    # It does *not* strictly guarantee that the number of established TCP connections will never
    # exceed the limit (but it will be approximately correct) because connection termination happens
    # asynchronously. It also does *not* guarantee that the number of concurrently active handler
    # flow materializations will never exceed the limit for the reason that it is impossible to reliably
    # detect when a materialization has ended.
    max-connections = 1024

    # The maximum number of requests that are accepted (and dispatched to
    # the application) on one single connection before the first request
    # has to be completed.
    # Incoming requests that would cause the pipelining limit to be exceeded
    # are not read from the connections socket so as to build up "back-pressure"
    # to the client via TCP flow control.
    # A setting of 1 disables HTTP pipelining, since only one request per
    # connection can be "open" (i.e. being processed by the application) at any
    # time. Set to higher values to enable HTTP pipelining.
    # This value must be > 0 and <= 1024.
    pipelining-limit = 1

    # Enables/disables the addition of a `Remote-Address` header
    # holding the clients (remote) IP address.
    # Deprecated since Akka HTTP 10.2.0: please use `remote-address-attribute` instead.
    remote-address-header = off

    # Enables/disables the addition of a remote-address attribute in HttpRequest
    # holding the clients (remote) IP address. This is preferred over `remote-address-header`
    # because it cannot be confused with a real header.
    remote-address-attribute = off

    # Enables/disables the addition of a `Raw-Request-URI` header holding the
    # original raw request URI as the client has sent it.
    raw-request-uri-header = off

    # Enables/disables automatic handling of HEAD requests.
    # If this setting is enabled the server dispatches HEAD requests as GET
    # requests to the application and automatically strips off all message
    # bodies from outgoing responses.
    # Note that, even when this setting is off the server will never send
    # out message bodies on responses to HEAD requests.
    transparent-head-requests = off

    # Enables/disables the returning of more detailed error messages to
    # the client in the error response.
    # Should be disabled for browser-facing APIs due to the risk of XSS attacks
    # and (probably) enabled for internal or non-browser APIs.
    # Note that pekko-http will always produce log messages containing the full
    # error details.
    verbose-error-messages = off

    # The initial size of the buffer to render the response headers in.
    # Can be used for fine-tuning response rendering performance but probably
    # doesn't have to be fiddled with in most applications.
    response-header-size-hint = 512

    # The requested maximum length of the queue of incoming connections.
    # If the server is busy and the backlog is full the OS will start dropping
    # SYN-packets and connection attempts may fail. Note, that the backlog
    # size is usually only a maximum size hint for the OS and the OS can
    # restrict the number further based on global limits.
    backlog = 100

    # If this setting is empty the server only accepts requests that carry a
    # non-empty `Host` header. Otherwise it responds with `400 Bad Request`.
    # Set to a non-empty value to be used in lieu of a missing or empty `Host`
    # header to make the server accept such requests.
    # Note that the server will never accept HTTP/1.1 request without a `Host`
    # header, i.e. this setting only affects HTTP/1.1 requests with an empty
    # `Host` header as well as HTTP/1.0 requests.
    # Examples: `www.spray.io` or `example.com:8080`
    default-host-header = ""

    # Socket options to set for the listening socket. If a setting is left
    # undefined, it will use whatever the default on the system is.
    socket-options {
      so-receive-buffer-size = undefined
      so-send-buffer-size = undefined
      so-reuse-address = undefined
      so-traffic-class = undefined
      tcp-keep-alive = undefined
      tcp-oob-inline = undefined
      tcp-no-delay = undefined
    }

    # When graceful termination is enabled and used invoked with a deadline,
    # after the deadline passes pending requests will be replied to with a "terminating" http response,
    # instead of delivering those requests to the user-handler.
    # This response is configurable here using configuration, or via code in case more a sophisticated (e.g. with response entity)
    # response is needed.
    #
    termination-deadline-exceeded-response {
      # Status code of the "terminating" response to be automatically sent to pending requests once the termination deadline is exceeded.
      status = 503 # ServiceUnavailable
    }

    # Modify to tweak parsing settings on the server-side only.
    parsing {
      # no overrides by default, see `pekko.http.parsing` for default values

      # Server-specific parsing settings:

      # Default maximum content length which should not be exceeded by incoming request entities.
      # Can be changed at runtime (to a higher or lower value) via the `HttpEntity::withSizeLimit` method.
      # Note that it is not necessarily a problem to set this to a high value as all stream operations
      # are always properly backpressured.
      # Nevertheless you might want to apply some limit in order to prevent a single client from consuming
      # an excessive amount of server resources.
      #
      # Set to `infinite` to completely disable entity length checks. (Even then you can still apply one
      # programmatically via `withSizeLimit`.)
      max-content-length = 8m

      # When a request is so malformed we cannot create a RequestContext out of it,
      # the regular exception handling does not apply, and a default error handling
      # is applied that only has access to the parse error and not the actual request.
      # To customize this error response, set error-handler to the FQCN of an
      # implementation of org.apache.pekko.http.ParsingErrorHandler
      error-handler = "org.apache.pekko.http.DefaultParsingErrorHandler$"
    }

    # Enables/disables the logging of unencrypted HTTP traffic to and from the HTTP
    # server for debugging reasons.
    #
    # Note: Use with care. Logging of unencrypted data traffic may expose secret data.
    #
    # Incoming and outgoing traffic will be logged in hexdump format. To enable logging,
    # specify the number of bytes to log per chunk of data (the actual chunking depends
    # on implementation details and networking conditions and should be treated as
    # arbitrary).
    #
    # For logging on the client side, see pekko.http.client.log-unencrypted-network-bytes.
    #
    # `off` : no log messages are produced
    # Int   : determines how many bytes should be logged per data chunk
    log-unencrypted-network-bytes = off

    # Cancellation in the HTTP streams is delayed by this duration to prevent race conditions between cancellation
    # and stream completion / failure. In most cases, the value chosen here should make no difference because
    # HTTP streams are loops where completion and failures should propagate immediately and make the handling of
    # cancellations redundant.
    #
    # In most cases, there should be no reason to change this setting.
    #
    # Set to 0 to disable the delay.
    stream-cancellation-delay = 100 millis

    http2 {
      # The maximum number of request per connection concurrently dispatched to the request handler.
      # This limit is enforced as soon as the connection between the peers is established. Enforcing
      # the limit even before the SETTINGS/SETTINGS_ACK exchange has completed, means
      # that we will refuse extra streams/requests that were sent by the client right after
      # the connection was established but before it received our SETTINGS.
      max-concurrent-streams = 256

      # The maximum number of bytes to receive from a request entity in a single chunk.
      #
      # The reasoning to limit that amount (instead of delivering all buffered data for a stream) is that
      # the amount of data in the internal buffers will drive backpressure and flow control on the HTTP/2 level. Bigger
      # chunks would mean that the user-level entity reader will have to buffer all that data if it cannot read it in one
      # go. The implementation would not be able to backpressure further data in that case because it does not know about
      # this user-level buffer.
      request-entity-chunk-size = 65536 b

      # The number of request data bytes the HTTP/2 implementation is allowed to buffer internally per connection. Free
      # space in this buffer is communicated to the peer using HTTP/2 flow-control messages to backpressure data if it
      # isn't read fast enough.
      #
      # When there is no backpressure, this amount will limit the amount of in-flight data. It might need to be increased
      # for high bandwidth-delay-product connections.
      #
      # There is a relation between the `incoming-connection-level-buffer-size` and the `incoming-stream-level-buffer-size`:
      # If incoming-connection-level-buffer-size < incoming-stream-level-buffer-size * number_of_streams, then
      # head-of-line blocking is possible between different streams on the same connection.
      incoming-connection-level-buffer-size = 10 MB

      # The number of request data bytes the HTTP/2 implementation is allowed to buffer internally per stream. Free space
      # in this buffer is communicated to the peer using HTTP/2 flow-control messages to backpressure data if it isn't
      # read fast enough.
      #
      # When there is no backpressure, this amount will limit the amount of in-flight data per stream. It might need to
      # be increased for high bandwidth-delay-product connections.
      incoming-stream-level-buffer-size = 512kB

      # For incoming requests, the infrastructure collects at least the given number of bytes before dispatching a HttpRequest.
      # If all request data is received before or when the threshold is reached, the entity data is dispatched as a strict entity
      # which allows more efficient processing of the request data without involving streams.
      #
      # If the given value is 0, the request is immediately dispatched and always carries a stream of data.
      # You can use value `1` to create a strict entity if the request contains at most a single data frame, and a streamed
      # entity otherwise.
      #
      # If `min-collect-strict-entity-size > 0` and a request is cancelled (RST_STREAM) before enough data has been collected and the request
      # is dispatched, then the request is silently discarded.
      #
      # To avoid flow control dead-locks, the value must be both smaller or equal than `incoming-stream-level-buffer-size`
      # and `incoming-connection-level-buffer-size / max-concurrent-streams`.
      #
      # Note, that if enabled, requests that expect data (endStream = false on HEADERS) but never received a DATA frame will never be
      # dispatched. In a regular HTTP context, this is uncommon. It would require that a client would expect data from the server first
      # before sending any own data.
      min-collect-strict-entity-size = 0

      # The maximum number of outgoing control frames to buffer when the peer does not read from its TCP connection before
      # backpressuring incoming frames.
      #
      # On a healthy HTTP/2 connection this setting should have little effect because control frames are given priority over
      # data frames and should not be buffered for a long time.
      #
      # The limit is necessary to prevent a malicious peer to solicit buffering of outgoing control frames (e.g. by sending PINGs)
      # without ever reading frames ultimately leading to an out of memory situation. With the limit in place, the implementation
      # stops reading incoming frames when the number of outgoing control frames has reached the given amount. This way an attacker
      # isn't able to communicate any further without first freeing space in the TCP window, draining the buffered control frames.
      #
      # See CVE-2019-9512 for an example of such an attack.
      #
      # Note that only control frames are affected because data frames, in contrast, are covered by the HTTP/2 flow control.
      outgoing-control-frame-buffer-size = 1024

      # Enable verbose debug logging for all ingoing and outgoing frames
      log-frames = false

      # When there is no data transmitted in either direction, but there are active streams, send a HTTP/2 ping frame with this initial
      # delay and subsequent interval to make sure the connection is kept alive, 0s disables sending ping frames.
      ping-interval = 0s

      # Fail the connection if a sent ping is not acknowledged within this timeout.
      # When zero the ping-interval is used, if set the value must be evenly divisible by less than or equal to the ping-interval.
      ping-timeout = 0s

      frame-type-throttle {
        # Configure the throttle for non-data frame types (https://github.com/apache/pekko-http/issues/332).
        # The supported frame-types for throttlng are:
        # reset, headers, continuation, go-away, priority, ping, push-promise, window-update
        # If you are concerned about CVE-2023-44487, you could set: pekko.http.server.http2.frame-type-throttle.frame-types = ["reset"]
        frame-types = []
        cost = 100
        burst = 100
        # interval must be a positive duration
        interval = 1s
      }
    }

    websocket {
      # periodic keep alive may be implemented using by sending Ping frames
      # upon which the other side is expected to reply with a Pong frame,
      # or by sending a Pong frame, which serves as unidirectional heartbeat.
      # Valid values:
      #   ping - default, for bi-directional ping/pong keep-alive heartbeating
      #   pong - for uni-directional pong keep-alive heartbeating
      #
      # It is also possible to provide a payload for each heartbeat message,
      # this setting can be configured programatically by modifying the websocket settings.
      # See: https://pekko.apache.org/docs/pekko-http/current/server-side/websocket-support.html
      periodic-keep-alive-mode = ping

      # Interval for sending periodic keep-alives
      # The frame sent will be the one configured in pekko.http.server.websocket.periodic-keep-alive-mode
      # `infinite` by default, or a duration that is the max idle interval after which an keep-alive frame should be sent
      # The value `infinite` means that *no* keep-alive heartbeat will be sent, as: "the allowed idle time is infinite"
      periodic-keep-alive-max-idle = infinite

      # Enable verbose debug logging for all ingoing and outgoing frames
      log-frames = false
    }
  }

  #client-settings
  client {
    # The default value of the `User-Agent` header to produce if no
    # explicit `User-Agent`-header was included in a request.
    # If this value is the empty string and no header was included in
    # the request, no `User-Agent` header will be rendered at all.
    user-agent-header = pekko-http/${pekko.http.version}

    # The time period within which the TCP connecting process must be completed.
    connecting-timeout = 10s

    # The time after which an idle connection will be automatically closed.
    # Set to `infinite` to completely disable idle timeouts.
    idle-timeout = 60 s

    # The initial size of the buffer to render the request headers in.
    # Can be used for fine-tuning request rendering performance but probably
    # doesn't have to be fiddled with in most applications.
    request-header-size-hint = 512

    # Socket options to set for the listening socket. If a setting is left
    # undefined, it will use whatever the default on the system is.
    socket-options {
      so-receive-buffer-size = undefined
      so-send-buffer-size = undefined
      so-reuse-address = undefined
      so-traffic-class = undefined
      tcp-keep-alive = undefined
      tcp-oob-inline = undefined
      tcp-no-delay = undefined
    }

    # Client https proxy options. When using ClientTransport.httpsProxy() with or without credentials,
    # host/port must be either passed explicitly or set here. If a host is not set, the proxy will not be used.
    proxy {
      https {
        host = ""
        port = 443
      }
    }

    # Modify to tweak parsing settings on the client-side only.
    parsing {
      # no overrides by default, see `pekko.http.parsing` for default values

      # Default maximum content length which should not be exceeded by incoming response entities.
      # Can be changed at runtime (to a higher or lower value) via the `HttpEntity::withSizeLimit` method.
      # Note that it is not necessarily a problem to set this to a high value as all stream operations
      # are always properly backpressured.
      #
      # On the client-side, this limit is disabled by default because a client controls the requests it runs.
      #
      # Set to `infinite` to completely disable entity length checks. (Even then you can still apply one
      # programmatically via `withSizeLimit`.)
      max-content-length = infinite
    }

    # Enables/disables the logging of unencrypted HTTP traffic to and from the HTTP
    # client for debugging reasons.
    #
    # Note: Use with care. Logging of unencrypted data traffic may expose secret data.
    #
    # Incoming and outgoing traffic will be logged in hexdump format. To enable logging,
    # specify the number of bytes to log per chunk of data (the actual chunking depends
    # on implementation details and networking conditions and should be treated as
    # arbitrary).
    #
    # For logging on the server side, see pekko.http.server.log-unencrypted-network-bytes.
    #
    # `off` : no log messages are produced
    # Int   : determines how many bytes should be logged per data chunk
    log-unencrypted-network-bytes = off

    #client-settings
    // FIXME: unify with server-side part (by importing or similar to parsing)
    http2 {
      # The maximum number of request per connection concurrently dispatched to the request handler.
      # This limit is enforced as soon as the connection between the peers is established. Enforcing
      # the limit even before the SETTINGS/SETTINGS_ACK exchange has completed, means
      # that we will refuse extra streams/push promises that were sent by the client right after
      # the connection was established but before it received our SETTINGS.
      max-concurrent-streams = 256

      # The maximum number of bytes to receive from a request entity in a single chunk.
      #
      # The reasoning to limit that amount (instead of delivering all buffered data for a stream) is that
      # the amount of data in the internal buffers will drive backpressure and flow control on the HTTP/2 level. Bigger
      # chunks would mean that the user-level entity reader will have to buffer all that data if it cannot read it in one
      # go. The implementation would not be able to backpressure further data in that case because it does not know about
      # this user-level buffer.
      request-entity-chunk-size = 65536 b

      # The number of request data bytes the HTTP/2 implementation is allowed to buffer internally per connection. Free
      # space in this buffer is communicated to the peer using HTTP/2 flow-control messages to backpressure data if it
      # isn't read fast enough.
      #
      # When there is no backpressure, this amount will limit the amount of in-flight data. It might need to be increased
      # for high bandwidth-delay-product connections.
      #
      # There is a relation between the `incoming-connection-level-buffer-size` and the `incoming-stream-level-buffer-size`:
      # If incoming-connection-level-buffer-size < incoming-stream-level-buffer-size * number_of_streams, then
      # head-of-line blocking is possible between different streams on the same connection.
      incoming-connection-level-buffer-size = 10 MB

      # The number of request data bytes the HTTP/2 implementation is allowed to buffer internally per stream. Free space
      # in this buffer is communicated to the peer using HTTP/2 flow-control messages to backpressure data if it isn't
      # read fast enough.
      #
      # When there is no backpressure, this amount will limit the amount of in-flight data per stream. It might need to
      # be increased for high bandwidth-delay-product connections.
      incoming-stream-level-buffer-size = 512kB

      # The maximum number of outgoing control frames to buffer when the peer does not read from its TCP connection before
      # backpressuring incoming frames.
      #
      # On a healthy HTTP/2 connection this setting should have little effect because control frames are given priority over
      # data frames and should not be buffered for a long time.
      #
      # The limit is necessary to prevent a malicious peer to solicit buffering of outgoing control frames (e.g. by sending PINGs)
      # without ever reading frames ultimately leading to an out of memory situation. With the limit in place, the implementation
      # stops reading incoming frames when the number of outgoing control frames has reached the given amount. This way an attacker
      # isn't able to communicate any further without first freeing space in the TCP window, draining the buffered control frames.
      #
      # See CVE-2019-9512 for an example of such an attack.
      #
      # Note that only control frames are affected because data frames, in contrast, are covered by the HTTP/2 flow control.
      outgoing-control-frame-buffer-size = 1024

      # Enable verbose debug logging for all ingoing and outgoing frames
      log-frames = false

      # When there is no data transmitted in either direction, but there are active streams, send a HTTP/2 ping frame with this initial
      # delay and subsequent interval to make sure the connection is kept alive, 0s disables sending ping frames.
      ping-interval = 0s

      # Fail the connection if a sent ping is not acknowledged within this timeout.
      # When zero the ping-interval is used, if set the value must be evenly divisible by less than or equal to the ping-interval.
      ping-timeout = 0s

      # The maximum number of times a connections is attempted
      # before giving up and returning an error.
      # Set to zero to retry indefinitely.
      max-persistent-attempts = 0

      # Starting backoff before reconnecting when a persistent HTTP/2 client connection fails
      # see `pekko.http.host-connection-pool.base-connection-backoff` for details on the backoff mechanism.
      base-connection-backoff = ${pekko.http.host-connection-pool.base-connection-backoff}

      # Max backoff between reconnect attempts when a persistent HTTP/2 client connection fails
      # see `pekko.http.host-connection-pool.max-connection-backoff` for details on the backoff mechanism.
      max-connection-backoff = ${pekko.http.host-connection-pool.max-connection-backoff}

      # When gracefully closing the HTTP/2 client, await at most `completion-timeout` for in-flight
      # requests to complete.
      completion-timeout = 3s

    }

    #client-settings
    websocket {
      # periodic keep alive may be implemented using by sending Ping frames
      # upon which the other side is expected to reply with a Pong frame,
      # or by sending a Pong frame, which serves as unidirectional heartbeat.
      # Valid values:
      #   ping - default, for bi-directional ping/pong keep-alive heartbeating
      #   pong - for uni-directional pong keep-alive heartbeating
      #
      # See https://tools.ietf.org/html/rfc6455#section-5.5.2
      # and https://tools.ietf.org/html/rfc6455#section-5.5.3 for more information
      periodic-keep-alive-mode = ping

      # Interval for sending periodic keep-alives
      # The frame sent will be the one configured in pekko.http.server.websocket.periodic-keep-alive-mode
      # `infinite` by default, or a duration that is the max idle interval after which an keep-alive frame should be sent
      periodic-keep-alive-max-idle = infinite

      # Enable verbose debug logging for all ingoing and outgoing frames
      log-frames = false
    }

    # Cancellation in the HTTP streams is delayed by this duration to prevent race conditions between cancellation
    # and stream completion / failure. In most cases, the value chosen here should make no difference because
    # HTTP streams are loops where completion and failures should propagate immediately and make the handling of
    # cancellations redundant.
    #
    # In most cases, there should be no reason to change this setting.
    #
    # Set to 0 to disable the delay.
    stream-cancellation-delay = 100 millis
  }
  #client-settings

  #pool-settings
  host-connection-pool {
    # The maximum number of parallel connections that a connection pool to a
    # single host endpoint is allowed to establish. Must be greater than zero.
    max-connections = 4

    # The minimum number of parallel connections that a pool should keep alive ("hot").
    # If the number of connections is falling below the given threshold, new ones are being spawned.
    # You can use this setting to build a hot pool of "always on" connections.
    # Default is 0, meaning there might be no active connection at given moment.
    # Keep in mind that `min-connections` should be smaller than `max-connections` or equal
    min-connections = 0

    # The maximum number of times failed requests are attempted again,
    # (if the request can be safely retried) before giving up and returning an error.
    # Set to zero to completely disable request retries.
    max-retries = 5

    # The maximum number of open requests accepted into the pool across all
    # materializations of any of its client flows.
    # Protects against (accidentally) overloading a single pool with too many client flow materializations.
    # Note that with N concurrent materializations the max number of open request in the pool
    # will never exceed N * max-connections * pipelining-limit.
    # Must be a power of 2 and > 0!
    max-open-requests = 32

    # The maximum duration for a connection to be kept alive
    # This amount gets modified by a 10 percent fuzzyness to avoid the simultanous reconnections
    # defaults to 'infinite'
    # Note that this is only implemented in the new host connection pool
    max-connection-lifetime = infinite

    # Client-side pipelining is not currently supported. See https://github.com/apache/pekko-http/issues/32
    pipelining-limit = 1

    # The minimum duration to backoff new connection attempts after the previous connection attempt failed.
    #
    # The pool uses an exponential randomized backoff scheme. After the first failure, the next attempt will only be
    # tried after a random duration between the base connection backoff and twice the base connection backoff. If that
    # attempt fails as well, the next attempt will be delayed by twice that amount. The total delay is capped using the
    # `max-connection-backoff` setting.
    #
    # The backoff applies for the complete pool. I.e. after one failed connection attempt, further connection attempts
    # to that host will backoff for all connections of the pool. After the service recovered, connections will come out
    # of backoff one by one due to the random extra backoff time. This is to avoid overloading just recently recovered
    # services with new connections ("thundering herd").
    #
    # Example: base-connection-backoff = 100ms, max-connection-backoff = 10 seconds
    #   - After 1st failure, backoff somewhere between 100ms and 200ms
    #   - After 2nd, between  200ms and  400ms
    #   - After 3rd, between  200ms and  400ms
    #   - After 4th, between  400ms and  800ms
    #   - After 5th, between  800ms and 1600ms
    #   - After 6th, between 1600ms and 3200ms
    #   - After 7th, between 3200ms and 6400ms
    #   - After 8th, between 5000ms and 10 seconds (max capped by max-connection-backoff, min by half of that)
    #   - After 9th, etc., stays between 5000ms and 10 seconds
    #
    # This setting only applies to the new pool implementation and is ignored for the legacy one.
    base-connection-backoff = 100ms

    # Maximum backoff duration between failed connection attempts. For more information see the above comment for the
    # `base-connection-backoff` setting.
    #
    # This setting only applies to the new pool implementation and is ignored for the legacy one.
    max-connection-backoff = 2 min

    # The time after which an idle connection pool (without pending requests)
    # will automatically terminate itself. Set to `infinite` to completely disable idle timeouts.
    idle-timeout = 30 s

    # HTTP connections are commonly used for multiple requests, that is, they are kept alive between requests. The
    # `pekko.http.host-connection-pool.keep-alive-timeout` setting configures how long a pool keeps a connection alive between
    # requests before it closes the connection (and eventually reestablishes it).
    #
    # A common scenario where this setting is useful is to prevent a race-condition inherent in HTTP: in most cases, a server
    # or reverse-proxy closes a persistent (kept-alive) connection after some time. HTTP does not define a protocol between
    # client and server to negotiate a graceful teardown of an idle persistent connection. Therefore, it can happen that a server decides to
    # close a connection at the same time that a client decides to send a new request. In that case, the request will fail to be processed,
    # but the client cannot determine for which reason the server closed the connection and whether the request was (partly) processed or not.
    # Such a condition can be observed when a request fails with an `UnexpectedConnectionClosureException` or a `StreamTcpException` stating
    # "Connection reset by peer".
    #
    # To prevent this from happening, you can set the timeout to a lower value than the server-side keep-alive timeout
    # (which you either have to know or find out experimentally).
    #
    # Set to `infinite` to allow the connection to remain open indefinitely (or be closed by the more general `idle-timeout`).
    keep-alive-timeout = infinite

    # The pool implementation will fail a connection early and clear the slot if a response entity was not
    # subscribed during the given time period after the response was dispatched. In busy systems the timeout might be
    # too tight if a response is not picked up quick enough after it was dispatched by the pool.
    response-entity-subscription-timeout = 1.second

    # Modify this section to tweak client settings only for host connection pools APIs like `Http().superPool` or
    # `Http().singleRequest`.
    client = {
      # no overrides by default, see `pekko.http.client` for default values
    }

    #per-host-overrides
    # Allows overriding settings per host. The setting must be a list in which each entry
    # is an object with a `host-pattern` entry that specifies for which hosts the overrides
    # should take effect. All other entries have the same syntax as entries in the
    # `host-connection-pool` section.
    #
    # The `host-pattern` can have these forms:
    #  * `regex:`: the host name is matched against the regular expression pattern
    #  * `glob:` or just ``: the host is matched against the given
    #    pattern. In the pattern the wildcard `*` stands for zero or more characters and `?`
    #    for any single character
    #
    # In both cases, a pattern that matches `*.` at the beginning, i.e. every subdomain,
    # is expanded to also cover the domain itself (without the leading dot).
    #
    # If patterns from multiple entries in the list are matched, only settings from the
    # first entry found are applied.
    #
    # Example:
    #
    # per-host-override = [
    # {
    #   host-pattern = "pekko.apache.org"
    #   # Use the same entries as in the `host-connection-pool` section
    #   max-connections = 10
    # },
    # {
    #   # `*.apache.org` matches all subdomains like `repo.apache.org` but also `apache.org` itself.
    #   # `pekko.apache.org` is already covered by a previous entry, so these settings here
    #   # will not apply to `pekko.apache.org`.
    #   host-pattern = "*.apache.org"
    #   max-connections = 11
    # }
    # ]
    per-host-override = []
    #per-host-overrides

  }
  #pool-settings

  # Modify to tweak default parsing settings.
  #
  # IMPORTANT:
  # Please note that this sections settings can be overridden by the corresponding settings in:
  # `pekko.http.server.parsing`, `pekko.http.client.parsing` or `pekko.http.host-connection-pool.client.parsing`.
  parsing {
    # The limits for the various parts of the HTTP message parser.
    max-uri-length             = 2k
    max-method-length          = 16
    max-response-reason-length = 64
    max-header-name-length     = 64
    max-header-value-length    = 8k
    max-header-count           = 64
    max-chunk-ext-length       = 256
    max-chunk-size             = 1m

    # HTTP comments (as e.g. prominently used in User-Agent headers) can be nested. To avoid too deep nesting
    # and the associated parsing and storage cost, the depth of nested comments is limited to the given value.
    max-comment-parsing-depth  = 5

    # The maximum number of bytes to allow when reading the entire entity into memory with `toStrict`
    # (which is used by the `toStrictEntity` and `extractStrictEntity` directives)
    max-to-strict-bytes = 8m

    # Sets the strictness mode for parsing request target URIs.
    # The following values are defined:
    #
    # `strict`: RFC3986-compliant URIs are required,
    #     a 400 response is triggered on violations
    #
    # `relaxed`: all visible 7-Bit ASCII chars are allowed
    #
    uri-parsing-mode = strict

    # Sets the parsing mode for parsing cookies.
    # The following value are defined:
    #
    # `rfc6265`: Only RFC6265-compliant cookies are parsed. Surrounding double-quotes are accepted and
    #   automatically removed. Non-compliant cookies are silently discarded.
    # `raw`: Raw parsing allows any non-control character but ';' to appear in a cookie value. There's no further
    #   post-processing applied, so that the resulting value string may contain any number of whitespace, unicode,
    #   double quotes, or '=' characters at any position.
    #   The rules for parsing the cookie name are the same ones from RFC 6265.
    #
    cookie-parsing-mode = rfc6265

    # Enables/disables the logging of warning messages in case an incoming
    # message (request or response) contains an HTTP header which cannot be
    # parsed into its high-level model class due to incompatible syntax.
    # Note that, independently of this settings, pekko-http will accept messages
    # with such headers as long as the message as a whole would still be legal
    # under the HTTP specification even without this header.
    # If a header cannot be parsed into a high-level model instance it will be
    # provided as a `RawHeader`.
    # If logging is enabled it is performed with the configured
    # `error-logging-verbosity`.
    illegal-header-warnings = on

    # Sets the list of headers for which illegal values will *not* cause warning logs to be emitted;
    #
    # Adding a header name to this setting list disables the logging of warning messages in case an incoming message
    # contains an HTTP header which cannot be parsed into its high-level model class due to incompatible syntax.
    ignore-illegal-header-for = []

    # Parse headers into typed model classes in the Pekko Http core layer.
    #
    # If set to `off`, only essential headers will be parsed into their model classes. All other ones will be provided
    # as instances of `RawHeader`. Currently, `Connection`, `Host`, and `Expect` headers will still be provided in their
    # typed model. The full list of headers still provided as modeled instances can be found in the source code of
    # `org.apache.pekko.http.impl.engine.parsing.HttpHeaderParser.alwaysParsedHeaders`. Note that (regardless of this setting)
    # some headers like `Content-Type` are treated specially and will never be provided in the list of headers.
    modeled-header-parsing = on

    # Configures the verbosity with which message (request or response) parsing
    # errors are written to the application log.
    #
    # Supported settings:
    # `off`   : no log messages are produced
    # `simple`: a condensed single-line message is logged
    # `full`  : the full error details (potentially spanning several lines) are logged
    error-logging-verbosity = full

    # Configures the processing mode when encountering illegal characters in
    # header name of response.
    #
    # Supported mode:
    # `error`  : default mode, reject the request
    # `warn`   : ignore the illegal characters in response header value and log a warning message
    # `ignore` : just ignore the illegal characters in response header value
    illegal-response-header-name-processing-mode = error

    # Configures the processing mode when encountering illegal characters in
    # header value of response.
    #
    # Supported mode:
    # `error`  : default mode, throw an ParsingException and terminate the processing
    # `warn`   : ignore the illegal characters in response header value and log a warning message
    # `ignore` : just ignore the illegal characters in response header value
    illegal-response-header-value-processing-mode = error

    # Configures the processing mode when encountering conflicting `Content-Type` headers.
    #
    # Supported mode:
    # `error`           : default mode, throw an ParsingException and terminate the processing
    # `first`           : use the value of the first `Content-Type` header and add all subsequent `Content-Type` headers to the headers list
    # `last`            : use the value of the last `Content-Type` header and add all preceding `Content-Type` headers to the headers list
    # `no-content-type` : use `ContentTypes.NoContentType` and add all `Content-Type` headers to the headers list
    conflicting-content-type-header-processing-mode = error

    # limits for the number of different values per header type that the
    # header cache will hold
    header-cache {
      default = 12
      Content-MD5 = 0
      Date = 0
      If-Match = 0
      If-Modified-Since = 0
      If-None-Match = 0
      If-Range = 0
      If-Unmodified-Since = 0
      User-Agent = 32
    }

    # Enables/disables inclusion of an Tls-Session-Info header in parsed
    # messages over Tls transports (i.e., HttpRequest on server side and
    # HttpResponse on client side).
    tls-session-info-header = off

    # Enables/disables inclusion of an SslSession attribute in parsed
    # messages over Tls transports (i.e., HttpRequest on server side and
    # HttpResponse on client side).
    ssl-session-attribute = off
  }

}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy