+str,doc #18012 adds missing docs on client-side http in java
This commit is contained in:
parent
2f4d6d269f
commit
30bb717468
13 changed files with 375 additions and 40 deletions
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
|
||||
package docs.http.javadsl;
|
||||
|
||||
import akka.actor.ActorSystem;
|
||||
import akka.http.javadsl.HostConnectionPool;
|
||||
import akka.japi.Option;
|
||||
import akka.japi.Pair;
|
||||
import akka.util.ByteString;
|
||||
import org.junit.Test;
|
||||
|
||||
import scala.Tuple2;
|
||||
import scala.concurrent.Future;
|
||||
import akka.stream.ActorMaterializer;
|
||||
import akka.stream.javadsl.*;
|
||||
import akka.http.javadsl.OutgoingConnection;
|
||||
import akka.http.javadsl.model.*;
|
||||
import akka.http.javadsl.Http;
|
||||
import scala.util.Try;
|
||||
|
||||
public class HttpClientExampleDocTest {
|
||||
|
||||
// compile only test
|
||||
public void testConstructRequest() {
|
||||
//#outgoing-connection-example
|
||||
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer materializer = ActorMaterializer.create(system);
|
||||
|
||||
final Flow<HttpRequest, HttpResponse, Future<OutgoingConnection>> connectionFlow =
|
||||
Http.get(system).outgoingConnection("akka.io", 80);
|
||||
final Future<HttpResponse> responseFuture =
|
||||
Source.single(HttpRequest.create("/"))
|
||||
.via(connectionFlow)
|
||||
.runWith(Sink.head(), materializer);
|
||||
//#outgoing-connection-example
|
||||
}
|
||||
|
||||
// compile only test
|
||||
public void testHostLevelExample() {
|
||||
//#host-level-example
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer materializer = ActorMaterializer.create(system);
|
||||
|
||||
// construct a pool client flow with context type `Int`
|
||||
// TODO these Tuple2 will be changed to akka.japi.Pair
|
||||
final Flow<
|
||||
Tuple2<HttpRequest, Integer>,
|
||||
Tuple2<Try<HttpResponse>, Integer>,
|
||||
HostConnectionPool> poolClientFlow =
|
||||
Http.get(system).<Integer>cachedHostConnectionPool("akka.io", 80, materializer);
|
||||
|
||||
// construct a pool client flow with context type `Int`
|
||||
|
||||
final Future<Tuple2<Try<HttpResponse>, Integer>> responseFuture =
|
||||
Source
|
||||
.single(Pair.create(HttpRequest.create("/"), 42).toScala())
|
||||
.via(poolClientFlow)
|
||||
.runWith(Sink.head(), materializer);
|
||||
//#host-level-example
|
||||
}
|
||||
|
||||
// compile only test
|
||||
public void testSingleRequestExample() {
|
||||
//#single-request-example
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer materializer = ActorMaterializer.create(system);
|
||||
|
||||
final Future<HttpResponse> responseFuture =
|
||||
Http.get(system)
|
||||
.singleRequest(HttpRequest.create("http://akka.io"), materializer);
|
||||
//#single-request-example
|
||||
}
|
||||
}
|
||||
|
|
@ -13,7 +13,7 @@ import akka.http.javadsl.model.*;
|
|||
import akka.http.javadsl.model.headers.*;
|
||||
//#import-model
|
||||
|
||||
public class ModelTest {
|
||||
public class ModelDocTest {
|
||||
@Test
|
||||
public void testConstructRequest() {
|
||||
//#construct-request
|
||||
|
|
@ -1,8 +1,68 @@
|
|||
.. _ConnectionLevelApi-java:
|
||||
.. _connection-level-api-java:
|
||||
|
||||
Connection-Level Client-Side API
|
||||
================================
|
||||
|
||||
TODO
|
||||
The connection-level API is the lowest-level client-side API Akka HTTP provides. It gives you full control over when
|
||||
HTTP connections are opened and closed and how requests are to be send across which connection. As such it offers the
|
||||
highest flexibility at the cost of providing the least convenience.
|
||||
|
||||
For the time being, :ref:`see the Scala chapter on the same topic <ConnectionLevelApi>`.
|
||||
|
||||
Opening HTTP Connections
|
||||
------------------------
|
||||
With the connection-level API you open a new HTTP connection to a target endpoint by materializing a ``Flow``
|
||||
returned by the ``Http.get(system).outgoingConnection(...)`` method. Here is an example:
|
||||
|
||||
.. includecode:: ../../code/docs/http/javadsl/HttpClientExampleDocTest.java#outgoing-connection-example
|
||||
|
||||
Apart from the host name and port the ``Http.get(system).outgoingConnection(...)`` method also allows you to specify socket options
|
||||
and a number of configuration settings for the connection.
|
||||
|
||||
Note that no connection is attempted until the returned flow is actually materialized! If the flow is materialized
|
||||
several times then several independent connections will be opened (one per materialization).
|
||||
If the connection attempt fails, for whatever reason, the materialized flow will be immediately terminated with a
|
||||
respective exception.
|
||||
|
||||
|
||||
Request-Response Cycle
|
||||
----------------------
|
||||
|
||||
Once the connection flow has been materialized it is ready to consume ``HttpRequest`` instances from the source it is
|
||||
attached to. Each request is sent across the connection and incoming responses dispatched to the downstream pipeline.
|
||||
Of course and as always, back-pressure is adequately maintained across all parts of the
|
||||
connection. This means that, if the downstream pipeline consuming the HTTP responses is slow, the request source will
|
||||
eventually be slowed down in sending requests.
|
||||
|
||||
Any errors occurring on the underlying connection are surfaced as exceptions terminating the response stream (and
|
||||
canceling the request source).
|
||||
|
||||
Note that, if the source produces subsequent requests before the prior responses have arrived, these requests will be
|
||||
pipelined__ across the connection, which is something that is not supported by all HTTP servers.
|
||||
Also, if the server closes the connection before responses to all requests have been received this will result in the
|
||||
response stream being terminated with a truncation error.
|
||||
|
||||
__ http://en.wikipedia.org/wiki/HTTP_pipelining
|
||||
|
||||
|
||||
Closing Connections
|
||||
-------------------
|
||||
|
||||
Akka HTTP actively closes an established connection upon reception of a response containing ``Connection: close`` header.
|
||||
The connection can also be closed by the server.
|
||||
|
||||
An application can actively trigger the closing of the connection by completing the request stream. In this case the
|
||||
underlying TCP connection will be closed when the last pending response has been received.
|
||||
|
||||
|
||||
Timeouts
|
||||
--------
|
||||
|
||||
Currently Akka HTTP doesn't implement client-side request timeout checking itself as this functionality can be regarded
|
||||
as a more general purpose streaming infrastructure feature.
|
||||
However, akka-stream should soon provide such a feature.
|
||||
|
||||
|
||||
Stand-Alone HTTP Layer Usage
|
||||
----------------------------
|
||||
|
||||
// TODO
|
||||
|
|
@ -1,8 +1,147 @@
|
|||
.. _HostLevelApi-java:
|
||||
.. _host-level-api-java:
|
||||
|
||||
Host-Level Client-Side API
|
||||
==========================
|
||||
|
||||
TODO
|
||||
As opposed to the :ref:`connection-level-api` the host-level API relieves you from manually managing individual HTTP
|
||||
connections. It autonomously manages a configurable pool of connections to *one particular target endpoint* (i.e.
|
||||
host/port combination).
|
||||
|
||||
For the time being, :ref:`see the Scala chapter on the same topic <HostLevelApi>`.
|
||||
|
||||
Requesting a Host Connection Pool
|
||||
---------------------------------
|
||||
|
||||
The best way to get a hold of a connection pool to a given target endpoint is the ``Http.get(system).cachedHostConnectionPool(...)``
|
||||
method, which returns a ``Flow`` that can be "baked" into an application-level stream setup. This flow is also called
|
||||
a "pool client flow".
|
||||
|
||||
The connection pool underlying a pool client flow is cached. For every ``ActorSystem``, target endpoint and pool
|
||||
configuration there will never be more than a single pool live at any time.
|
||||
|
||||
Also, the HTTP layer transparently manages idle shutdown and restarting of connection pools as configured.
|
||||
The client flow instances therefore remain valid throughout the lifetime of the application, i.e. they can be
|
||||
materialized as often as required and the time between individual materialization is of no importance.
|
||||
|
||||
When you request a pool client flow with ``Http.get(system).cachedHostConnectionPool(...)`` Akka HTTP will immediately start
|
||||
the pool, even before the first client flow materialization. However, this running pool will not actually open the
|
||||
first connection to the target endpoint until the first request has arrived.
|
||||
|
||||
|
||||
Configuring a Host Connection Pool
|
||||
----------------------------------
|
||||
|
||||
Apart from the connection-level config settings and socket options there are a number of settings that allow you to
|
||||
influence the behavior of the connection pool logic itself.
|
||||
Check out the ``akka.http.client.host-connection-pool`` section of the Akka HTTP :ref:`akka-http-configuration-java` for
|
||||
more information about which settings are available and what they mean.
|
||||
|
||||
Note that, if you request pools with different configurations for the same target host you will get *independent* pools.
|
||||
This means that, in total, your application might open more concurrent HTTP connections to the target endpoint than any
|
||||
of the individual pool's ``max-connections`` settings allow!
|
||||
|
||||
There is one setting that likely deserves a bit deeper explanation: ``max-open-requests``.
|
||||
This setting limits the maximum number of requests that can be in-flight at any time for a single connection pool.
|
||||
If an application calls ``Http.get(system).cachedHostConnectionPool(...)`` 3 times (with the same endpoint and settings) it will get
|
||||
back ``3`` different client flow instances for the same pool. If each of these client flows is then materialized ``4`` times
|
||||
(concurrently) the application will have 12 concurrently running client flow materializations.
|
||||
All of these share the resources of the single pool.
|
||||
|
||||
This means that, if the pool's ``pipelining-limit`` is left at ``1`` (effecitvely disabeling pipelining) (effecitvely disabeling pipelining), no more than 12 requests can be open at any time.
|
||||
With a ``pipelining-limit`` of ``8`` and 12 concurrent client flow materializations the theoretical open requests
|
||||
maximum is ``96``.
|
||||
|
||||
The ``max-open-requests`` config setting allows for applying a hard limit which serves mainly as a protection against
|
||||
erroneous connection pool use, e.g. because the application is materializing too many client flows that all compete for
|
||||
the same pooled connections.
|
||||
|
||||
.. _using-a-host-connection-pool-java:
|
||||
|
||||
Using a Host Connection Pool
|
||||
----------------------------
|
||||
|
||||
The "pool client flow" returned by ``Http.get(system).cachedHostConnectionPool(...)`` has the following type::
|
||||
|
||||
// TODO Tuple2 will be changed to be `akka.japi.Pair`
|
||||
Flow[Tuple2[HttpRequest, T], Tuple2[Try[HttpResponse], T], HostConnectionPool]
|
||||
|
||||
This means it consumes tuples of type ``(HttpRequest, T)`` and produces tuples of type ``(Try[HttpResponse], T)``
|
||||
which might appear more complicated than necessary on first sight.
|
||||
The reason why the pool API includes objects of custom type ``T`` on both ends lies in the fact that the underlying
|
||||
transport usually comprises more than a single connection and as such the pool client flow often generates responses in
|
||||
an order that doesn't directly match the consumed requests.
|
||||
We could have built the pool logic in a way that reorders responses according to their requests before dispatching them
|
||||
to the application, but this would have meant that a single slow response could block the delivery of potentially many
|
||||
responses that would otherwise be ready for consumption by the application.
|
||||
|
||||
In order to prevent unnecessary head-of-line blocking the pool client-flow is allowed to dispatch responses as soon as
|
||||
they arrive, independently of the request order. Of course this means that there needs to be another way to associate a
|
||||
response with its respective request. The way that this is done is by allowing the application to pass along a custom
|
||||
"context" object with the request, which is then passed back to the application with the respective response.
|
||||
This context object of type ``T`` is completely opaque to Akka HTTP, i.e. you can pick whatever works best for your
|
||||
particular application scenario.
|
||||
|
||||
|
||||
Connection Allocation Logic
|
||||
---------------------------
|
||||
|
||||
This is how Akka HTTP allocates incoming requests to the available connection "slots":
|
||||
|
||||
1. If there is a connection alive and currently idle then schedule the request across this connection.
|
||||
2. If no connection is idle and there is still an unconnected slot then establish a new connection.
|
||||
3. If all connections are already established and "loaded" with other requests then pick the connection with the least
|
||||
open requests (< the configured ``pipelining-limit``) that only has requests with idempotent methods scheduled to it,
|
||||
if there is one.
|
||||
4. Otherwise apply back-pressure to the request source, i.e. stop accepting new requests.
|
||||
|
||||
For more information about scheduling more than one request at a time across a single connection see
|
||||
`this wikipedia entry on HTTP pipelining`__.
|
||||
|
||||
__ http://en.wikipedia.org/wiki/HTTP_pipelining
|
||||
|
||||
|
||||
|
||||
Retrying a Request
|
||||
------------------
|
||||
|
||||
If the ``max-retries`` pool config setting is greater than zero the pool retries idempotent requests for which
|
||||
a response could not be successfully retrieved. Idempotent requests are those whose HTTP method is defined to be
|
||||
idempotent by the HTTP spec, which are all the ones currently modelled by Akka HTTP except for the ``POST``, ``PATCH``
|
||||
and ``CONNECT`` methods.
|
||||
|
||||
When a response could not be received for a certain request there are essentially three possible error scenarios:
|
||||
|
||||
1. The request got lost on the way to the server.
|
||||
2. The server experiences a problem while processing the request.
|
||||
3. The response from the server got lost on the way back.
|
||||
|
||||
Since the host connector cannot know which one of these possible reasons caused the problem and therefore ``PATCH`` and
|
||||
``POST`` requests could have already triggered a non-idempotent action on the server these requests cannot be retried.
|
||||
|
||||
In these cases, as well as when all retries have not yielded a proper response, the pool produces a failed ``Try``
|
||||
(i.e. a ``scala.util.Failure``) together with the custom request context.
|
||||
|
||||
|
||||
Pool Shutdown
|
||||
-------------
|
||||
|
||||
Completing a pool client flow will simply detach the flow from the pool. The connection pool itself will continue to run
|
||||
as it may be serving other client flows concurrently or in the future. Only after the configured ``idle-timeout`` for
|
||||
the pool has expired will Akka HTTP automatically terminate the pool and free all its resources.
|
||||
|
||||
If a new client flow is requested with ``Http.get(system).cachedHostConnectionPool(...)`` or if an already existing client flow is
|
||||
re-materialized the respective pool is automatically and transparently restarted.
|
||||
|
||||
In addition to the automatic shutdown via the configured idle timeouts it's also possible to trigger the immediate
|
||||
shutdown of a specific pool by calling ``shutdown()`` on the :class:`HostConnectionPool` instance that the pool client
|
||||
flow materializes into. This ``shutdown()`` call produces a ``Future[Unit]`` which is fulfilled when the pool
|
||||
termination has been completed.
|
||||
|
||||
It's also possible to trigger the immediate termination of *all* connection pools in the ``ActorSystem`` at the same
|
||||
time by calling ``Http.get(system).shutdownAllConnectionPools()``. This call too produces a ``Future[Unit]`` which is fulfilled when
|
||||
all pools have terminated.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
.. includecode:: ../../code/docs/http/javadsl/HttpClientExampleDocTest.java#host-level-example
|
||||
|
|
@ -3,7 +3,22 @@
|
|||
Consuming HTTP-based Services (Client-Side)
|
||||
===========================================
|
||||
|
||||
...
|
||||
All client-side functionality of Akka HTTP, for consuming HTTP-based services offered by other endpoints, is currently
|
||||
provided by the ``akka-http-core`` module.
|
||||
|
||||
Depending on your application's specific needs you can choose from three different API levels:
|
||||
|
||||
:ref:`connection-level-api-java`
|
||||
for full-control over when HTTP connections are opened/closed and how requests are scheduled across them
|
||||
|
||||
:ref:`host-level-api-java`
|
||||
for letting Akka HTTP manage a connection-pool to *one specific* host/port endpoint
|
||||
|
||||
:ref:`request-level-api-java`
|
||||
for letting Akka HTTP perform all connection management
|
||||
|
||||
You can interact with different API levels at the same time and, independently of which API level you choose,
|
||||
Akka HTTP will happily handle many thousand concurrent connections to a single or many different hosts.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
|
|
|||
|
|
@ -1,8 +1,47 @@
|
|||
.. _RequestLevelApi-java:
|
||||
.. _request-level-api-java:
|
||||
|
||||
Request-Level Client-Side API
|
||||
=============================
|
||||
|
||||
TODO
|
||||
The request-level API is the most convenient way of using Akka HTTP's client-side functionality. It internally builds upon the
|
||||
:ref:`host-level-api-java` to provide you with a simple and easy-to-use way of retrieving HTTP responses from remote servers.
|
||||
Depending on your preference you can pick the flow-based or the future-based variant.
|
||||
|
||||
For the time being, :ref:`see the Scala chapter on the same topic <RequestLevelApi>`.
|
||||
|
||||
Flow-Based Variant
|
||||
------------------
|
||||
|
||||
The flow-based variant of the request-level client-side API is presented by the ``Http().superPool(...)`` method.
|
||||
It creates a new "super connection pool flow", which routes incoming requests to a (cached) host connection pool
|
||||
depending on their respective effective URIs.
|
||||
|
||||
The ``Flow`` returned by ``Http().superPool(...)`` is very similar to the one from the :ref:`host-level-api-java`, so the
|
||||
:ref:`using-a-host-connection-pool-java` section also applies here.
|
||||
|
||||
However, there is one notable difference between a "host connection pool client flow" for the host-level API and a
|
||||
"super-pool flow":
|
||||
Since in the former case the flow has an implicit target host context the requests it takes don't need to have absolute
|
||||
URIs or a valid ``Host`` header. The host connection pool will automatically add a ``Host`` header if required.
|
||||
|
||||
For a super-pool flow this is not the case. All requests to a super-pool must either have an absolute URI or a valid
|
||||
``Host`` header, because otherwise it'd be impossible to find out which target endpoint to direct the request to.
|
||||
|
||||
|
||||
Future-Based Variant
|
||||
--------------------
|
||||
|
||||
Sometimes your HTTP client needs are very basic. You simply need the HTTP response for a certain request and don't
|
||||
want to bother with setting up a full-blown streaming infrastructure.
|
||||
|
||||
For these cases Akka HTTP offers the ``Http().singleRequest(...)`` method, which simply turns an ``HttpRequest`` instance
|
||||
into ``Future<HttpResponse>``. Internally the request is dispatched across the (cached) host connection pool for the
|
||||
request's effective URI.
|
||||
|
||||
Just like in the case of the super-pool flow described above the request must have either an absolute URI or a valid
|
||||
``Host`` header, otherwise the returned future will be completed with an error.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
.. includecode:: ../../code/docs/http/javadsl/HttpClientExampleDocTest.java#single-request-example
|
||||
|
|
@ -13,7 +13,7 @@ Overview
|
|||
Since akka-http-core provides the central HTTP data structures you will find the following import in quite a
|
||||
few places around the code base (and probably your own code as well):
|
||||
|
||||
.. includecode:: ../code/docs/http/javadsl/ModelTest.java
|
||||
.. includecode:: ../code/docs/http/javadsl/ModelDocTest.java
|
||||
:include: import-model
|
||||
|
||||
This brings all of the most relevant types in scope, mainly:
|
||||
|
|
@ -50,7 +50,7 @@ An ``HttpRequest`` consists of
|
|||
|
||||
Here are some examples how to construct an ``HttpRequest``:
|
||||
|
||||
.. includecode:: ../code/docs/http/javadsl/ModelTest.java
|
||||
.. includecode:: ../code/docs/http/javadsl/ModelDocTest.java
|
||||
:include: construct-request
|
||||
|
||||
In its basic form ``HttpRequest.create`` creates an empty default GET request without headers which can then be
|
||||
|
|
@ -71,7 +71,7 @@ An ``HttpResponse`` consists of
|
|||
|
||||
Here are some examples how to construct an ``HttpResponse``:
|
||||
|
||||
.. includecode:: ../code/docs/http/javadsl/ModelTest.java
|
||||
.. includecode:: ../code/docs/http/javadsl/ModelDocTest.java
|
||||
:include: construct-response
|
||||
|
||||
In addition to the simple ``HttpEntities.create`` methods which create an entity from a fixed ``String`` or ``ByteString``
|
||||
|
|
@ -169,7 +169,7 @@ as a ``RawHeader`` (which is essentially a String/String name/value pair).
|
|||
|
||||
See these examples of how to deal with headers:
|
||||
|
||||
.. includecode:: ../code/docs/http/javadsl/ModelTest.java
|
||||
.. includecode:: ../code/docs/http/javadsl/ModelDocTest.java
|
||||
:include: headers
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
.. _ConnectionLevelApi:
|
||||
.. _connection-level-api:
|
||||
|
||||
Connection-Level Client-Side API
|
||||
================================
|
||||
|
|
@ -11,9 +11,8 @@ highest flexibility at the cost of providing the least convenience.
|
|||
Opening HTTP Connections
|
||||
------------------------
|
||||
|
||||
With the connection-level API you open a new HTTP connection by materializing a ``Flow`` returned by the
|
||||
``Http().outgoingConnection(...)`` method. The target endpoint to which a connection is to be opened needs to be
|
||||
specified as an argument to ``Http().outgoingConnection(...)``. Here is an example:
|
||||
With the connection-level API you open a new HTTP connection to a target endpoint by materializing a ``Flow``
|
||||
returned by the ``Http().outgoingConnection(...)`` method. Here is an example:
|
||||
|
||||
.. includecode:: ../../code/docs/http/scaladsl/HttpClientExampleSpec.scala
|
||||
:include: outgoing-connection-example
|
||||
|
|
@ -51,7 +50,7 @@ Closing Connections
|
|||
-------------------
|
||||
|
||||
Akka HTTP actively closes an established connection upon reception of a response containing ``Connection: close`` header.
|
||||
Of course the connection can also be closed by the server.
|
||||
The connection can also be closed by the server.
|
||||
|
||||
An application can actively trigger the closing of the connection by completing the request stream. In this case the
|
||||
underlying TCP connection will be closed when the last pending response has been received.
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
.. _HostLevelApi:
|
||||
.. _host-level-api:
|
||||
|
||||
Host-Level Client-Side API
|
||||
==========================
|
||||
|
||||
As opposed to the :ref:`ConnectionLevelApi` the host-level API relieves you from manually managing individual HTTP
|
||||
As opposed to the :ref:`connection-level-api` the host-level API relieves you from manually managing individual HTTP
|
||||
connections. It autonomously manages a configurable pool of connections to *one particular target endpoint* (i.e.
|
||||
host/port combination).
|
||||
|
||||
|
|
@ -40,15 +40,15 @@ This means that, in total, your application might open more concurrent HTTP conn
|
|||
of the individual pool's ``max-connections`` settings allow!
|
||||
|
||||
There is one setting that likely deserves a bit deeper explanation: ``max-open-requests``.
|
||||
This setting limits the maximum number of requests that can be open at any time for a single connection pool.
|
||||
This setting limits the maximum number of requests that can be in-flight at any time for a single connection pool.
|
||||
If an application calls ``Http().cachedHostConnectionPool(...)`` 3 times (with the same endpoint and settings) it will get
|
||||
back 3 different client flow instances for the same pool. If each of these client flows is then materialized 4 times
|
||||
back ``3`` different client flow instances for the same pool. If each of these client flows is then materialized ``4`` times
|
||||
(concurrently) the application will have 12 concurrently running client flow materializations.
|
||||
All of these share the resources of the single pool.
|
||||
|
||||
This means that, if the pool's ``pipelining-limit`` is left at ``1``, no more than 12 requests can be open at any time.
|
||||
This means that, if the pool's ``pipelining-limit`` is left at ``1`` (effecitvely disabeling pipelining), no more than 12 requests can be open at any time.
|
||||
With a ``pipelining-limit`` of ``8`` and 12 concurrent client flow materializations the theoretical open requests
|
||||
maximum is 96.
|
||||
maximum is ``96``.
|
||||
|
||||
The ``max-open-requests`` config setting allows for applying a hard limit which serves mainly as a protection against
|
||||
erroneous connection pool use, e.g. because the application is materializing too many client flows that all compete for
|
||||
|
|
@ -131,7 +131,7 @@ If a new client flow is requested with ``Http().cachedHostConnectionPool(...)``
|
|||
re-materialized the respective pool is automatically and transparently restarted.
|
||||
|
||||
In addition to the automatic shutdown via the configured idle timeouts it's also possible to trigger the immediate
|
||||
shutdown of specific pool by calling ``shutdown()`` on the ``Http().HostConnectionPool`` instance that a pool client
|
||||
shutdown of a specific pool by calling ``shutdown()`` on the :class:`HostConnectionPool` instance that the pool client
|
||||
flow materializes into. This ``shutdown()`` call produces a ``Future[Unit]`` which is fulfilled when the pool
|
||||
termination has been completed.
|
||||
|
||||
|
|
|
|||
|
|
@ -3,18 +3,18 @@
|
|||
Consuming HTTP-based Services (Client-Side)
|
||||
===========================================
|
||||
|
||||
All client-side functionality of Akka HTTP, for consuming HTTP-based services offered by other systems, is currently
|
||||
All client-side functionality of Akka HTTP, for consuming HTTP-based services offered by other endpoints, is currently
|
||||
provided by the ``akka-http-core`` module.
|
||||
|
||||
Depending your application's specific needs you can choose from three different API levels:
|
||||
Depending on your application's specific needs you can choose from three different API levels:
|
||||
|
||||
:ref:`ConnectionLevelApi`
|
||||
:ref:`connection-level-api`
|
||||
for full-control over when HTTP connections are opened/closed and how requests are scheduled across them
|
||||
|
||||
:ref:`HostLevelApi`
|
||||
:ref:`host-level-api`
|
||||
for letting Akka HTTP manage a connection-pool to *one specific* host/port endpoint
|
||||
|
||||
:ref:`RequestLevelApi`
|
||||
:ref:`request-level-api`
|
||||
for letting Akka HTTP perform all connection management
|
||||
|
||||
You can interact with different API levels at the same time and, independently of which API level you choose,
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
.. _RequestLevelApi:
|
||||
.. _request-level-api:
|
||||
|
||||
Request-Level Client-Side API
|
||||
=============================
|
||||
|
||||
The request-level API is the most convenient way of using Akka HTTP's client-side. It internally builds upon the
|
||||
:ref:`HostLevelApi` to provide you with a simple and easy-to-use way of retrieving HTTP responses from remote servers.
|
||||
The request-level API is the most convenient way of using Akka HTTP's client-side functionality. It internally builds upon the
|
||||
:ref:`host-level-api` to provide you with a simple and easy-to-use way of retrieving HTTP responses from remote servers.
|
||||
Depending on your preference you can pick the flow-based or the future-based variant.
|
||||
|
||||
|
||||
|
|
@ -13,9 +13,9 @@ Flow-Based Variant
|
|||
|
||||
The flow-based variant of the request-level client-side API is presented by the ``Http().superPool(...)`` method.
|
||||
It creates a new "super connection pool flow", which routes incoming requests to a (cached) host connection pool
|
||||
depending on their respective effective URI.
|
||||
depending on their respective effective URIs.
|
||||
|
||||
The ``Flow`` returned by ``Http().superPool(...)`` is very similar to the one from the :ref:`HostLevelApi`, so the
|
||||
The ``Flow`` returned by ``Http().superPool(...)`` is very similar to the one from the :ref:`host-level-api`, so the
|
||||
:ref:`using-a-host-connection-pool` section also applies here.
|
||||
|
||||
However, there is one notable difference between a "host connection pool client flow" for the host-level API and a
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue