From abbbfb5b5465596b27811f93c48dd7fe11992d32 Mon Sep 17 00:00:00 2001 From: Arnout Engelen Date: Wed, 3 Apr 2019 13:14:27 +0200 Subject: [PATCH] Use sbt-paradox-apidoc plugin (#26660) Instead of using custom code here. Checked with 'meld' that the result is the same, except for problems that were already there (filed #26659). --- akka-docs/src/main/paradox/cluster-client.md | 54 ++++++++--------- .../RestartFlow/onFailuresWithBackoff.md | 18 +++--- .../operators/RestartFlow/withBackoff.md | 14 ++--- .../operators/RestartSink/withBackoff.md | 14 ++--- .../RestartSource/onFailuresWithBackoff.md | 12 ++-- .../operators/RestartSource/withBackoff.md | 14 ++--- .../stream/operators/Source-or-Flow/ask.md | 2 +- .../main/paradox/stream/operators/index.md | 10 ++-- .../main/paradox/stream/stream-customize.md | 2 +- .../src/main/paradox/typed/actor-lifecycle.md | 8 +-- .../main/paradox/typed/distributed-data.md | 4 +- build.sbt | 1 + project/ParadoxSupport.scala | 59 ------------------- project/plugins.sbt | 1 + 14 files changed, 78 insertions(+), 135 deletions(-) diff --git a/akka-docs/src/main/paradox/cluster-client.md b/akka-docs/src/main/paradox/cluster-client.md index 689c2745db..c5fbbe9be9 100644 --- a/akka-docs/src/main/paradox/cluster-client.md +++ b/akka-docs/src/main/paradox/cluster-client.md @@ -13,15 +13,15 @@ To use Cluster Client, you must add the following dependency in your project: ## Introduction An actor system that is not part of the cluster can communicate with actors -somewhere in the cluster via the @unidoc[ClusterClient], the client can run in an `ActorSystem` that is part of +somewhere in the cluster via the @apidoc[ClusterClient], the client can run in an `ActorSystem` that is part of another cluster. It only needs to know the location of one (or more) nodes to use as initial -contact points. It will establish a connection to a @unidoc[akka.cluster.client.ClusterReceptionist] somewhere in +contact points. It will establish a connection to a @apidoc[akka.cluster.client.ClusterReceptionist] somewhere in the cluster. It will monitor the connection to the receptionist and establish a new connection if the link goes down. When looking for a new receptionist it uses fresh contact points retrieved from previous establishment, or periodically refreshed contacts, i.e. not necessarily the initial contact points. -Using the @unidoc[ClusterClient] for communicating with a cluster from the outside requires that the system with the client +Using the @apidoc[ClusterClient] for communicating with a cluster from the outside requires that the system with the client can both connect and be connected to with Akka Remoting from all the nodes in the cluster with a receptionist. This creates a tight coupling in that the client and cluster systems may need to have the same version of both Akka, libraries, message classes, serializers and potentially even the JVM. In many cases it is a better solution @@ -29,11 +29,11 @@ to use a more explicit and decoupling protocol such as [HTTP](https://doc.akka.i [gRPC](https://developer.lightbend.com/docs/akka-grpc/current/). Additionally since Akka Remoting is primarily designed as a protocol for Akka Cluster there is no explicit resource -management, when a @unidoc[ClusterClient] has been used it will cause connections with the cluster until the ActorSystem is +management, when a @apidoc[ClusterClient] has been used it will cause connections with the cluster until the ActorSystem is stopped (unlike other kinds of network clients). -@unidoc[ClusterClient] should not be used when sending messages to actors that run -within the same cluster. Similar functionality as the @unidoc[ClusterClient] is +@apidoc[ClusterClient] should not be used when sending messages to actors that run +within the same cluster. Similar functionality as the @apidoc[ClusterClient] is provided in a more efficient way by @ref:[Distributed Publish Subscribe in Cluster](distributed-pub-sub.md) for actors that belong to the same cluster. @@ -41,23 +41,23 @@ It is necessary that the connecting system has its `akka.actor.provider` set to the cluster client. The receptionist is supposed to be started on all nodes, or all nodes with specified role, -in the cluster. The receptionist can be started with the @unidoc[akka.cluster.client.ClusterReceptionist] extension +in the cluster. The receptionist can be started with the @apidoc[akka.cluster.client.ClusterReceptionist] extension or as an ordinary actor. -You can send messages via the @unidoc[ClusterClient] to any actor in the cluster that is registered -in the @unidoc[DistributedPubSubMediator] used by the @unidoc[akka.cluster.client.ClusterReceptionist]. -The @unidoc[ClusterClientReceptionist] provides methods for registration of actors that +You can send messages via the @apidoc[ClusterClient] to any actor in the cluster that is registered +in the @apidoc[DistributedPubSubMediator] used by the @apidoc[akka.cluster.client.ClusterReceptionist]. +The @apidoc[ClusterClientReceptionist] provides methods for registration of actors that should be reachable from the client. Messages are wrapped in `ClusterClient.Send`, @scala[@scaladoc[`ClusterClient.SendToAll`](akka.cluster.client.ClusterClient$)]@java[`ClusterClient.SendToAll`] or @scala[@scaladoc[`ClusterClient.Publish`](akka.cluster.client.ClusterClient$)]@java[`ClusterClient.Publish`]. -Both the @unidoc[ClusterClient] and the @unidoc[ClusterClientReceptionist] emit events that can be subscribed to. -The @unidoc[ClusterClient] sends out notifications in relation to having received a list of contact points -from the @unidoc[ClusterClientReceptionist]. One use of this list might be for the client to record its +Both the @apidoc[ClusterClient] and the @apidoc[ClusterClientReceptionist] emit events that can be subscribed to. +The @apidoc[ClusterClient] sends out notifications in relation to having received a list of contact points +from the @apidoc[ClusterClientReceptionist]. One use of this list might be for the client to record its contact points. A client that is restarted could then use this information to supersede any previously configured contact points. -The @unidoc[ClusterClientReceptionist] sends out notifications in relation to having received a contact -from a @unidoc[ClusterClient]. This notification enables the server containing the receptionist to become aware of +The @apidoc[ClusterClientReceptionist] sends out notifications in relation to having received a contact +from a @apidoc[ClusterClient]. This notification enables the server containing the receptionist to become aware of what clients are connected. 1. **ClusterClient.Send** @@ -86,13 +86,13 @@ to avoid inbound connections from other cluster nodes to the client: * @scala[@scaladoc[`sender()`](akka.actor.Actor)] @java[@javadoc[`getSender()`](akka.actor.Actor)] of the response messages, sent back from the destination and seen by the client, is `deadLetters` -since the client should normally send subsequent messages via the @unidoc[ClusterClient]. +since the client should normally send subsequent messages via the @apidoc[ClusterClient]. It is possible to pass the original sender inside the reply messages if the client is supposed to communicate directly to the actor in the cluster. -While establishing a connection to a receptionist the @unidoc[ClusterClient] will buffer +While establishing a connection to a receptionist the @apidoc[ClusterClient] will buffer messages and send them when the connection is established. If the buffer is full -the @unidoc[ClusterClient] will drop old messages when new messages are sent via the client. +the @apidoc[ClusterClient] will drop old messages when new messages are sent via the client. The size of the buffer is configurable and it can be disabled by using a buffer size of 0. It's worth noting that messages can always be lost because of the distributed nature @@ -116,7 +116,7 @@ Scala Java : @@snip [ClusterClientTest.java](/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #server } -On the client you create the @unidoc[ClusterClient] actor and use it as a gateway for sending +On the client you create the @apidoc[ClusterClient] actor and use it as a gateway for sending messages to the actors identified by their path (without address information) somewhere in the cluster. @@ -148,7 +148,7 @@ That is convenient and perfectly fine in most cases, but it can be good to know start the `akka.cluster.client.ClusterReceptionist` actor as an ordinary actor and you can have several different receptionists at the same time, serving different types of clients. -Note that the @unidoc[ClusterClientReceptionist] uses the @unidoc[DistributedPubSub] extension, which is described +Note that the @apidoc[ClusterClientReceptionist] uses the @apidoc[DistributedPubSub] extension, which is described in @ref:[Distributed Publish Subscribe in Cluster](distributed-pub-sub.md). It is recommended to load the extension when the actor system is started by defining it in the @@ -160,9 +160,9 @@ akka.extensions = ["akka.cluster.client.ClusterClientReceptionist"] ## Events -As mentioned earlier, both the @unidoc[ClusterClient] and @unidoc[ClusterClientReceptionist] emit events that can be subscribed to. +As mentioned earlier, both the @apidoc[ClusterClient] and @apidoc[ClusterClientReceptionist] emit events that can be subscribed to. The following code snippet declares an actor that will receive notifications on contact points (addresses to the available -receptionists), as they become available. The code illustrates subscribing to the events and receiving the @unidoc[ClusterClient] +receptionists), as they become available. The code illustrates subscribing to the events and receiving the @apidoc[ClusterClient] initial state. Scala @@ -171,7 +171,7 @@ Scala Java : @@snip [ClusterClientTest.java](/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #clientEventsListener } -Similarly we can have an actor that behaves in a similar fashion for learning what cluster clients are connected to a @unidoc[ClusterClientReceptionist]: +Similarly we can have an actor that behaves in a similar fashion for learning what cluster clients are connected to a @apidoc[ClusterClientReceptionist]: Scala : @@snip [ClusterClientSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #receptionistEventsListener } @@ -182,14 +182,14 @@ Java ## Configuration -The @unidoc[ClusterClientReceptionist] extension (or @unidoc[akka.cluster.client.ClusterReceptionistSettings]) can be configured +The @apidoc[ClusterClientReceptionist] extension (or @apidoc[akka.cluster.client.ClusterReceptionistSettings]) can be configured with the following properties: @@snip [reference.conf](/akka-cluster-tools/src/main/resources/reference.conf) { #receptionist-ext-config } -The following configuration properties are read by the @unidoc[ClusterClientSettings] -when created with a @scala[@scaladoc[`ActorSystem`](akka.actor.ActorSystem)]@java[@javadoc[`ActorSystem`](akka.actor.ActorSystem)] parameter. It is also possible to amend the @unidoc[ClusterClientSettings] -or create it from another config section with the same layout as below. @unidoc[ClusterClientSettings] is +The following configuration properties are read by the @apidoc[ClusterClientSettings] +when created with a @scala[@scaladoc[`ActorSystem`](akka.actor.ActorSystem)]@java[@javadoc[`ActorSystem`](akka.actor.ActorSystem)] parameter. It is also possible to amend the @apidoc[ClusterClientSettings] +or create it from another config section with the same layout as below. @apidoc[ClusterClientSettings] is a parameter to the @scala[@scaladoc[`ClusterClient.props`](akka.cluster.client.ClusterClient$)]@java[@javadoc[`ClusterClient.props`](akka.cluster.client.ClusterClient$)] factory method, i.e. each client can be configured with different settings if needed. diff --git a/akka-docs/src/main/paradox/stream/operators/RestartFlow/onFailuresWithBackoff.md b/akka-docs/src/main/paradox/stream/operators/RestartFlow/onFailuresWithBackoff.md index 27715833ad..fcadffac0c 100644 --- a/akka-docs/src/main/paradox/stream/operators/RestartFlow/onFailuresWithBackoff.md +++ b/akka-docs/src/main/paradox/stream/operators/RestartFlow/onFailuresWithBackoff.md @@ -1,6 +1,6 @@ # RestartFlow.onFailuresWithBackoff -Wrap the given @unidoc[Flow] with a @unidoc[Flow] that will restart it when it fails using an exponential backoff. Notice that this @unidoc[Flow] will not restart on completion of the wrapped flow. +Wrap the given @apidoc[Flow] with a @apidoc[Flow] that will restart it when it fails using an exponential backoff. Notice that this @apidoc[Flow] will not restart on completion of the wrapped flow. @ref[Error handling](../index.md#error-handling) @@ -14,17 +14,17 @@ Wrap the given @unidoc[Flow] with a @unidoc[Flow] that will restart it when it f ## Description -This @unidoc[Flow] will not emit any failure -The failures by the wrapped @unidoc[Flow] will be handled by -restarting the wrapping @unidoc[Flow] as long as maxRestarts is not reached. -Any termination signals sent to this @unidoc[Flow] however will terminate the wrapped @unidoc[Flow], if it's -running, and then the @unidoc[Flow] will be allowed to terminate without being restarted. +This @apidoc[Flow] will not emit any failure +The failures by the wrapped @apidoc[Flow] will be handled by +restarting the wrapping @apidoc[Flow] as long as maxRestarts is not reached. +Any termination signals sent to this @apidoc[Flow] however will terminate the wrapped @apidoc[Flow], if it's +running, and then the @apidoc[Flow] will be allowed to terminate without being restarted. The restart process is inherently lossy, since there is no coordination between cancelling and the sending of -messages. A termination signal from either end of the wrapped @unidoc[Flow] will cause the other end to be terminated, -and any in transit messages will be lost. During backoff, this @unidoc[Flow] will backpressure. +messages. A termination signal from either end of the wrapped @apidoc[Flow] will cause the other end to be terminated, +and any in transit messages will be lost. During backoff, this @apidoc[Flow] will backpressure. -This uses the same exponential backoff algorithm as @unidoc[Backoff]. +This uses the same exponential backoff algorithm as @apidoc[Backoff]. ## Reactive Streams semantics diff --git a/akka-docs/src/main/paradox/stream/operators/RestartFlow/withBackoff.md b/akka-docs/src/main/paradox/stream/operators/RestartFlow/withBackoff.md index 76276b763d..b998f4391c 100644 --- a/akka-docs/src/main/paradox/stream/operators/RestartFlow/withBackoff.md +++ b/akka-docs/src/main/paradox/stream/operators/RestartFlow/withBackoff.md @@ -1,6 +1,6 @@ # RestartFlow.withBackoff -Wrap the given @unidoc[Flow] with a @unidoc[Flow] that will restart it when it fails or complete using an exponential backoff. +Wrap the given @apidoc[Flow] with a @apidoc[Flow] that will restart it when it fails or complete using an exponential backoff. @ref[Error handling](../index.md#error-handling) @@ -14,16 +14,16 @@ Wrap the given @unidoc[Flow] with a @unidoc[Flow] that will restart it when it f ## Description -The resulting @unidoc[Flow] will not cancel, complete or emit a failure, until the opposite end of it has been cancelled or -completed. Any termination by the @unidoc[Flow] before that time will be handled by restarting it. Any termination -signals sent to this @unidoc[Flow] however will terminate the wrapped @unidoc[Flow], if it's running, and then the @unidoc[Flow] +The resulting @apidoc[Flow] will not cancel, complete or emit a failure, until the opposite end of it has been cancelled or +completed. Any termination by the @apidoc[Flow] before that time will be handled by restarting it. Any termination +signals sent to this @apidoc[Flow] however will terminate the wrapped @apidoc[Flow], if it's running, and then the @apidoc[Flow] will be allowed to terminate without being restarted. The restart process is inherently lossy, since there is no coordination between cancelling and the sending of -messages. A termination signal from either end of the wrapped @unidoc[Flow] will cause the other end to be terminated, -and any in transit messages will be lost. During backoff, this @unidoc[Flow] will backpressure. +messages. A termination signal from either end of the wrapped @apidoc[Flow] will cause the other end to be terminated, +and any in transit messages will be lost. During backoff, this @apidoc[Flow] will backpressure. -This uses the same exponential backoff algorithm as @unidoc[Backoff]. +This uses the same exponential backoff algorithm as @apidoc[Backoff]. ## Reactive Streams semantics diff --git a/akka-docs/src/main/paradox/stream/operators/RestartSink/withBackoff.md b/akka-docs/src/main/paradox/stream/operators/RestartSink/withBackoff.md index e0e80fc6b5..64bb8272e2 100644 --- a/akka-docs/src/main/paradox/stream/operators/RestartSink/withBackoff.md +++ b/akka-docs/src/main/paradox/stream/operators/RestartSink/withBackoff.md @@ -1,6 +1,6 @@ # RestartSink.withBackoff -Wrap the given @unidoc[Sink] with a @unidoc[Sink] that will restart it when it fails or complete using an exponential backoff. +Wrap the given @apidoc[Sink] with a @apidoc[Sink] that will restart it when it fails or complete using an exponential backoff. @ref[Error handling](../index.md#error-handling) @@ -14,14 +14,14 @@ Wrap the given @unidoc[Sink] with a @unidoc[Sink] that will restart it when it f ## Description -This @unidoc[Sink] will never cancel, since cancellation by the wrapped @unidoc[Sink] is always handled by restarting it. -The wrapped @unidoc[Sink] can however be completed by feeding a completion or error into this @unidoc[Sink]. When that -happens, the @unidoc[Sink], if currently running, will terminate and will not be restarted. This can be triggered -simply by the upstream completing, or externally by introducing a @unidoc[KillSwitch] right before this @unidoc[Sink] in the +This @apidoc[Sink] will never cancel, since cancellation by the wrapped @apidoc[Sink] is always handled by restarting it. +The wrapped @apidoc[Sink] can however be completed by feeding a completion or error into this @apidoc[Sink]. When that +happens, the @apidoc[Sink], if currently running, will terminate and will not be restarted. This can be triggered +simply by the upstream completing, or externally by introducing a @apidoc[KillSwitch] right before this @apidoc[Sink] in the graph. The restart process is inherently lossy, since there is no coordination between cancelling and the sending of -messages. When the wrapped @unidoc[Sink] does cancel, this @unidoc[Sink] will backpressure, however any elements already +messages. When the wrapped @apidoc[Sink] does cancel, this @apidoc[Sink] will backpressure, however any elements already sent may have been lost. -This uses the same exponential backoff algorithm as @unidoc[Backoff]. +This uses the same exponential backoff algorithm as @apidoc[Backoff]. diff --git a/akka-docs/src/main/paradox/stream/operators/RestartSource/onFailuresWithBackoff.md b/akka-docs/src/main/paradox/stream/operators/RestartSource/onFailuresWithBackoff.md index 6d37df9e72..e30e8368c2 100644 --- a/akka-docs/src/main/paradox/stream/operators/RestartSource/onFailuresWithBackoff.md +++ b/akka-docs/src/main/paradox/stream/operators/RestartSource/onFailuresWithBackoff.md @@ -1,6 +1,6 @@ # RestartSource.onFailuresWithBackoff -Wrap the given @unidoc[Source] with a @unidoc[Source] that will restart it when it fails using an exponential backoff. +Wrap the given @apidoc[Source] with a @apidoc[Source] that will restart it when it fails using an exponential backoff. @ref[Error handling](../index.md#error-handling) @@ -14,11 +14,11 @@ Wrap the given @unidoc[Source] with a @unidoc[Source] that will restart it when ## Description - This @unidoc[Source] will never emit a failure, since the failure of the wrapped @unidoc[Source] is always handled by - restarting. The wrapped @unidoc[Source] can be cancelled by cancelling this @unidoc[Source]. - When that happens, the wrapped @unidoc[Source], if currently running will be cancelled, and it will not be restarted. - This can be triggered simply by the downstream cancelling, or externally by introducing a @unidoc[KillSwitch] right - after this @unidoc[Source] in the graph. + This @apidoc[Source] will never emit a failure, since the failure of the wrapped @apidoc[Source] is always handled by + restarting. The wrapped @apidoc[Source] can be cancelled by cancelling this @apidoc[Source]. + When that happens, the wrapped @apidoc[Source], if currently running will be cancelled, and it will not be restarted. + This can be triggered simply by the downstream cancelling, or externally by introducing a @apidoc[KillSwitch] right + after this @apidoc[Source] in the graph. ## Reactive Streams semantics diff --git a/akka-docs/src/main/paradox/stream/operators/RestartSource/withBackoff.md b/akka-docs/src/main/paradox/stream/operators/RestartSource/withBackoff.md index 711ba86a58..55a034dd0c 100644 --- a/akka-docs/src/main/paradox/stream/operators/RestartSource/withBackoff.md +++ b/akka-docs/src/main/paradox/stream/operators/RestartSource/withBackoff.md @@ -1,6 +1,6 @@ # RestartSource.withBackoff -Wrap the given @unidoc[Source] with a @unidoc[Source] that will restart it when it fails or complete using an exponential backoff. +Wrap the given @apidoc[Source] with a @apidoc[Source] that will restart it when it fails or complete using an exponential backoff. @ref[Error handling](../index.md#error-handling) @@ -14,13 +14,13 @@ Wrap the given @unidoc[Source] with a @unidoc[Source] that will restart it when ## Description -This @unidoc[Flow] will never emit a complete or failure, since the completion or failure of the wrapped @unidoc[Source] -is always handled by restarting it. The wrapped @unidoc[Source] can however be cancelled by cancelling this @unidoc[Source]. -When that happens, the wrapped @unidoc[Source], if currently running will be cancelled, and it will not be restarted. -This can be triggered simply by the downstream cancelling, or externally by introducing a @unidoc[KillSwitch] right -after this @unidoc[Source] in the graph. +This @apidoc[Flow] will never emit a complete or failure, since the completion or failure of the wrapped @apidoc[Source] +is always handled by restarting it. The wrapped @apidoc[Source] can however be cancelled by cancelling this @apidoc[Source]. +When that happens, the wrapped @apidoc[Source], if currently running will be cancelled, and it will not be restarted. +This can be triggered simply by the downstream cancelling, or externally by introducing a @apidoc[KillSwitch] right +after this @apidoc[Source] in the graph. -This uses the same exponential backoff algorithm as @unidoc[Backoff]. +This uses the same exponential backoff algorithm as @apidoc[Backoff]. ## Reactive Streams semantics diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/ask.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/ask.md index f2072651ae..8ef622bf60 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/ask.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/ask.md @@ -13,7 +13,7 @@ Use the `ask` pattern to send a request-reply message to the target `ref` actor. ## Description Use the `ask` pattern to send a request-reply message to the target `ref` actor. -If any of the asks times out it will fail the stream with a @unidoc[AskTimeoutException]. +If any of the asks times out it will fail the stream with a @apidoc[AskTimeoutException]. The `mapTo` class parameter is used to cast the incoming responses to the expected response type. diff --git a/akka-docs/src/main/paradox/stream/operators/index.md b/akka-docs/src/main/paradox/stream/operators/index.md index b3b2cdc805..ea5f587d39 100644 --- a/akka-docs/src/main/paradox/stream/operators/index.md +++ b/akka-docs/src/main/paradox/stream/operators/index.md @@ -278,11 +278,11 @@ For more background see the @ref[Error Handling in Streams](../stream-error.md) | |Operator|Description| |--|--|--| -|RestartSource|@ref[onFailuresWithBackoff](RestartSource/onFailuresWithBackoff.md)|Wrap the given @unidoc[Source] with a @unidoc[Source] that will restart it when it fails using an exponential backoff.| -|RestartFlow|@ref[onFailuresWithBackoff](RestartFlow/onFailuresWithBackoff.md)|Wrap the given @unidoc[Flow] with a @unidoc[Flow] that will restart it when it fails using an exponential backoff. Notice that this @unidoc[Flow] will not restart on completion of the wrapped flow.| -|RestartSource|@ref[withBackoff](RestartSource/withBackoff.md)|Wrap the given @unidoc[Source] with a @unidoc[Source] that will restart it when it fails or complete using an exponential backoff.| -|RestartFlow|@ref[withBackoff](RestartFlow/withBackoff.md)|Wrap the given @unidoc[Flow] with a @unidoc[Flow] that will restart it when it fails or complete using an exponential backoff.| -|RestartSink|@ref[withBackoff](RestartSink/withBackoff.md)|Wrap the given @unidoc[Sink] with a @unidoc[Sink] that will restart it when it fails or complete using an exponential backoff.| +|RestartSource|@ref[onFailuresWithBackoff](RestartSource/onFailuresWithBackoff.md)|Wrap the given @apidoc[Source] with a @apidoc[Source] that will restart it when it fails using an exponential backoff.| +|RestartFlow|@ref[onFailuresWithBackoff](RestartFlow/onFailuresWithBackoff.md)|Wrap the given @apidoc[Flow] with a @apidoc[Flow] that will restart it when it fails using an exponential backoff. Notice that this @apidoc[Flow] will not restart on completion of the wrapped flow.| +|RestartSource|@ref[withBackoff](RestartSource/withBackoff.md)|Wrap the given @apidoc[Source] with a @apidoc[Source] that will restart it when it fails or complete using an exponential backoff.| +|RestartFlow|@ref[withBackoff](RestartFlow/withBackoff.md)|Wrap the given @apidoc[Flow] with a @apidoc[Flow] that will restart it when it fails or complete using an exponential backoff.| +|RestartSink|@ref[withBackoff](RestartSink/withBackoff.md)|Wrap the given @apidoc[Sink] with a @apidoc[Sink] that will restart it when it fails or complete using an exponential backoff.| @@@ index diff --git a/akka-docs/src/main/paradox/stream/stream-customize.md b/akka-docs/src/main/paradox/stream/stream-customize.md index 330702f422..9381c4571c 100644 --- a/akka-docs/src/main/paradox/stream/stream-customize.md +++ b/akka-docs/src/main/paradox/stream/stream-customize.md @@ -310,7 +310,7 @@ In that sense, it serves a very similar purpose as `ActorLogging` does for Actor @@@ div { .group-java } -You can extend the @unidoc[GraphStageLogicWithLogging] or @unidoc[TimerGraphStageLogicWithLogging] classes +You can extend the @apidoc[GraphStageLogicWithLogging] or @apidoc[TimerGraphStageLogicWithLogging] classes instead of the usual `GraphStageLogic` to enable you to obtain a `LoggingAdapter` inside your operator as long as the `Materializer` you're using is able to provide you with a logger. diff --git a/akka-docs/src/main/paradox/typed/actor-lifecycle.md b/akka-docs/src/main/paradox/typed/actor-lifecycle.md index cb80b0ca6d..c75511352d 100644 --- a/akka-docs/src/main/paradox/typed/actor-lifecycle.md +++ b/akka-docs/src/main/paradox/typed/actor-lifecycle.md @@ -17,7 +17,7 @@ TODO intro ## Creating Actors An actor can create, or _spawn_, an arbitrary number of child actors, which in turn can spawn children of their own, thus -forming an actor hierarchy. @unidoc[akka.actor.typed.ActorSystem] hosts the hierarchy and there can be only one _root actor_, +forming an actor hierarchy. @apidoc[akka.actor.typed.ActorSystem] hosts the hierarchy and there can be only one _root actor_, actor at the top of the hierarchy of the `ActorSystem`. The lifecycle of a child actor is tied to the parent -- a child can stop itself or be stopped at any time but it can never outlive its parent. @@ -37,7 +37,7 @@ Java @@@ Note -In the untyped counter part, the @unidoc[akka.actor.ActorSystem], the root actor was provided out of the box and you +In the untyped counter part, the @apidoc[akka.actor.ActorSystem], the root actor was provided out of the box and you could spawn top-level actors from the outside of the `ActorSystem` using `actorOf`. @ref:[SpawnProtocol](#spawnprotocol) is a tool that mimics the old style of starting up actors. @@ -46,7 +46,7 @@ is a tool that mimics the old style of starting up actors. ### Spawning Children -Child actors are spawned with @scala[@unidoc[akka.actor.typed.scaladsl.ActorContext]]@java[@unidoc[akka.actor.typed.javadsl.ActorContext]]'s `spawn`. +Child actors are spawned with @scala[@apidoc[akka.actor.typed.scaladsl.ActorContext]]@java[@apidoc[akka.actor.typed.javadsl.ActorContext]]'s `spawn`. In the example below, when the root actor is started, it spawns a child actor described by the behavior `HelloWorld.greeter`. Additionally, when the root actor receives a `Start` message, it creates a child actor defined by the behavior `HelloWorldBot.bot`: @@ -57,7 +57,7 @@ Scala Java : @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-main } -To specify a dispatcher when spawning an actor use @unidoc[DispatcherSelector]. If not specified, the actor will +To specify a dispatcher when spawning an actor use @apidoc[DispatcherSelector]. If not specified, the actor will use the default dispatcher, see @ref:[Default dispatcher](../dispatchers.md#default-dispatcher) for details. Scala diff --git a/akka-docs/src/main/paradox/typed/distributed-data.md b/akka-docs/src/main/paradox/typed/distributed-data.md index 5d9d363c10..fe128ebfc9 100644 --- a/akka-docs/src/main/paradox/typed/distributed-data.md +++ b/akka-docs/src/main/paradox/typed/distributed-data.md @@ -41,9 +41,9 @@ out-of-date value. ## Using the Replicator -The @scala[@unidoc[akka.cluster.ddata.typed.scaladsl.Replicator]]@java[@unidoc[akka.cluster.ddata.typed.javadsl.Replicator]] +The @scala[@apidoc[akka.cluster.ddata.typed.scaladsl.Replicator]]@java[@apidoc[akka.cluster.ddata.typed.javadsl.Replicator]] actor provides the API for interacting with the data and is accessed through the extension -@scala[@unidoc[akka.cluster.ddata.typed.scaladsl.DistributedData]]@java[@unidoc[akka.cluster.ddata.typed.javadsl.DistributedData]]. +@scala[@apidoc[akka.cluster.ddata.typed.scaladsl.DistributedData]]@java[@apidoc[akka.cluster.ddata.typed.javadsl.DistributedData]]. The messages for the replicator, such as `Replicator.Update` are defined in @scala[`akka.cluster.ddata.typed.scaladsl.Replicator`] @java[`akka.cluster.ddata.typed.scaladsl.Replicator`] but the actual CRDTs are the diff --git a/build.sbt b/build.sbt index 0ea1a34493..a07fa4eadb 100644 --- a/build.sbt +++ b/build.sbt @@ -270,6 +270,7 @@ lazy val docs = akkaModule("akka-docs") ), Compile / paradoxGroups := Map("Language" -> Seq("Scala", "Java")), resolvers += Resolver.jcenterRepo, + apidocRootPackage := "akka", deployRsyncArtifact := List((paradox in Compile).value -> s"www/docs/akka/${version.value}") ) .enablePlugins( diff --git a/project/ParadoxSupport.scala b/project/ParadoxSupport.scala index 2ba0869505..665569b4cb 100644 --- a/project/ParadoxSupport.scala +++ b/project/ParadoxSupport.scala @@ -32,69 +32,10 @@ object ParadoxSupport { { context: Writer.Context => new SignatureDirective(context.location.tree.label, context.properties, msg => log.warn(msg)) }, - { _: Writer.Context => new UnidocDirective(allClasses) } )} }.value ) - class UnidocDirective(allClasses: IndexedSeq[String]) extends InlineDirective("unidoc") { - def render(node: DirectiveNode, visitor: Visitor, printer: Printer): Unit = { - if (node.label.split('[')(0).contains('.')) { - val fqcn = node.label - if (allClasses.contains(fqcn)) { - val label = fqcn.split('.').last - syntheticNode("java", javaLabel(label), fqcn, node).accept(visitor) - syntheticNode("scala", label, fqcn, node).accept(visitor) - } else { - throw new java.lang.IllegalStateException(s"fqcn not found by @unidoc[$fqcn]") - } - } - else { - renderByClassName(node.label, node, visitor, printer) - } - } - - def javaLabel(label: String): String = - label.replaceAll("\\[", "<").replaceAll("\\]", ">").replace('_', '?') - - def syntheticNode(group: String, label: String, fqcn: String, node: DirectiveNode): DirectiveNode = { - val syntheticSource = new DirectiveNode.Source.Direct(fqcn) - val attributes = new org.pegdown.ast.DirectiveAttributes.AttributeMap() - new DirectiveNode(DirectiveNode.Format.Inline, group, null, null, attributes, null, - new DirectiveNode(DirectiveNode.Format.Inline, group + "doc", label, syntheticSource, node.attributes, fqcn, - new TextNode(label) - )) - } - - def renderByClassName(label: String, node: DirectiveNode, visitor: Visitor, printer: Printer): Unit = { - val label = node.label.replaceAll("\\\\_", "_") - val labelWithoutGenericParameters = label.split("\\[")(0) - val labelWithJavaGenerics = javaLabel(label) - val matches = allClasses.filter(_.endsWith('.' + labelWithoutGenericParameters)) - matches.size match { - case 0 => - throw new java.lang.IllegalStateException(s"No matches found for $label") - case 1 if matches(0).contains("adsl") => - throw new java.lang.IllegalStateException(s"Match for $label only found in one language: ${matches(0)}") - case 1 => - syntheticNode("scala", label, matches(0), node).accept(visitor) - syntheticNode("java", labelWithJavaGenerics, matches(0), node).accept(visitor) - case 2 if matches.forall(_.contains("adsl")) => - matches.foreach(m => { - if (!m.contains("javadsl")) - syntheticNode("scala", label, m, node).accept(visitor) - if (!m.contains("scaladsl")) - syntheticNode("java", labelWithJavaGenerics, m, node).accept(visitor) - }) - case n => - throw new java.lang.IllegalStateException( - s"$n matches found for @unidoc[$label], but not javadsl/scaladsl: ${matches.mkString(", ")}. " + - s"You may want to use the fully qualified class name as @unidoc[fqcn] instead of @unidoc[${label}]." - ) - } - } - } - class SignatureDirective(page: Page, variables: Map[String, String], logWarn: String => Unit) extends LeafBlockDirective("signature") { def render(node: DirectiveNode, visitor: Visitor, printer: Printer): Unit = try { diff --git a/project/plugins.sbt b/project/plugins.sbt index b10289690d..a8eabfea88 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -16,6 +16,7 @@ addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.15") addSbtPlugin("io.spray" % "sbt-boilerplate" % "0.6.1") addSbtPlugin("com.timushev.sbt" % "sbt-updates" % "0.3.4") addSbtPlugin("com.lightbend.akka" % "sbt-paradox-akka" % "0.18") +addSbtPlugin("com.lightbend.paradox" % "sbt-paradox-apidoc" % "0.1+9-d846d815") addSbtPlugin("com.lightbend" % "sbt-whitesource" % "0.1.13") addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "1.0.0") addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.0.0") // for maintenance of copyright file header