From 3eb9b3a1a6cc9060c8fe455c70aa48aa99e91716 Mon Sep 17 00:00:00 2001 From: Enno <458526+ennru@users.noreply.github.com> Date: Thu, 13 Sep 2018 10:49:51 +0200 Subject: [PATCH] Use absolute snippet paths (#25607) * Support absolute snippet path in signature directive * Removed $ akka $ from snippet paths * Remove $ code $ snippet alias * Remove $ code $ snippet prefix --- akka-docs/src/main/paradox/actors.md | 186 ++++++------ akka-docs/src/main/paradox/additional/faq.md | 2 +- akka-docs/src/main/paradox/additional/osgi.md | 4 +- akka-docs/src/main/paradox/agents.md | 30 +- akka-docs/src/main/paradox/camel.md | 92 +++--- akka-docs/src/main/paradox/cluster-client.md | 24 +- akka-docs/src/main/paradox/cluster-dc.md | 12 +- akka-docs/src/main/paradox/cluster-metrics.md | 18 +- akka-docs/src/main/paradox/cluster-routing.md | 26 +- .../src/main/paradox/cluster-sharding.md | 30 +- .../src/main/paradox/cluster-singleton.md | 30 +- akka-docs/src/main/paradox/cluster-usage.md | 50 ++-- .../src/main/paradox/common/circuitbreaker.md | 16 +- akka-docs/src/main/paradox/common/duration.md | 10 +- akka-docs/src/main/paradox/dispatchers.md | 56 ++-- .../src/main/paradox/distributed-data.md | 118 ++++---- .../src/main/paradox/distributed-pub-sub.md | 34 +-- akka-docs/src/main/paradox/event-bus.md | 56 ++-- akka-docs/src/main/paradox/extending-akka.md | 32 +- .../main/paradox/fault-tolerance-sample.md | 4 +- akka-docs/src/main/paradox/fault-tolerance.md | 46 +-- akka-docs/src/main/paradox/fsm.md | 76 ++--- akka-docs/src/main/paradox/futures.md | 138 ++++----- .../src/main/paradox/general/configuration.md | 32 +- akka-docs/src/main/paradox/general/jmm.md | 4 +- .../general/stream/stream-configuration.md | 2 +- .../src/main/paradox/general/supervision.md | 16 +- .../src/main/paradox/guide/tutorial_1.md | 20 +- .../src/main/paradox/guide/tutorial_2.md | 8 +- .../src/main/paradox/guide/tutorial_3.md | 28 +- .../src/main/paradox/guide/tutorial_4.md | 40 +-- .../src/main/paradox/guide/tutorial_5.md | 48 +-- akka-docs/src/main/paradox/howto.md | 4 +- akka-docs/src/main/paradox/io-tcp.md | 64 ++-- akka-docs/src/main/paradox/io-udp.md | 24 +- akka-docs/src/main/paradox/io.md | 6 +- akka-docs/src/main/paradox/logging.md | 26 +- akka-docs/src/main/paradox/mailboxes.md | 52 ++-- .../src/main/paradox/multi-jvm-testing.md | 2 +- .../src/main/paradox/multi-node-testing.md | 6 +- akka-docs/src/main/paradox/persistence-fsm.md | 32 +- .../src/main/paradox/persistence-journals.md | 32 +- .../main/paradox/persistence-query-leveldb.md | 22 +- .../src/main/paradox/persistence-query.md | 64 ++-- .../paradox/persistence-schema-evolution.md | 50 ++-- akka-docs/src/main/paradox/persistence.md | 148 +++++----- akka-docs/src/main/paradox/remoting-artery.md | 32 +- akka-docs/src/main/paradox/remoting.md | 24 +- akka-docs/src/main/paradox/routing.md | 274 +++++++++--------- akka-docs/src/main/paradox/scheduler.md | 24 +- akka-docs/src/main/paradox/serialization.md | 44 +-- .../paradox/stream/operators/ActorFlow/ask.md | 6 +- .../stream/operators/ActorSink/actorRef.md | 2 +- .../stream/operators/FileIO/fromPath.md | 2 +- .../paradox/stream/operators/FileIO/toPath.md | 2 +- .../operators/Flow/fromSinkAndSource.md | 2 +- .../Flow/fromSinkAndSourceCoupled.md | 2 +- .../stream/operators/Flow/lazyInitAsync.md | 2 +- .../paradox/stream/operators/Sink/actorRef.md | 2 +- .../stream/operators/Sink/actorRefWithAck.md | 8 +- .../stream/operators/Sink/asPublisher.md | 2 +- .../stream/operators/Sink/cancelled.md | 2 +- .../paradox/stream/operators/Sink/combine.md | 2 +- .../paradox/stream/operators/Sink/fold.md | 2 +- .../paradox/stream/operators/Sink/foreach.md | 2 +- .../stream/operators/Sink/foreachParallel.md | 2 +- .../stream/operators/Sink/fromSubscriber.md | 2 +- .../paradox/stream/operators/Sink/head.md | 2 +- .../stream/operators/Sink/headOption.md | 2 +- .../paradox/stream/operators/Sink/ignore.md | 2 +- .../paradox/stream/operators/Sink/last.md | 2 +- .../stream/operators/Sink/lastOption.md | 2 +- .../stream/operators/Sink/lazyInitAsync.md | 2 +- .../stream/operators/Sink/onComplete.md | 2 +- .../stream/operators/Sink/preMaterialize.md | 2 +- .../paradox/stream/operators/Sink/queue.md | 2 +- .../paradox/stream/operators/Sink/reduce.md | 2 +- .../main/paradox/stream/operators/Sink/seq.md | 2 +- .../paradox/stream/operators/Sink/takeLast.md | 2 +- .../stream/operators/Source-or-Flow/alsoTo.md | 2 +- .../stream/operators/Source-or-Flow/apply.md | 2 +- .../stream/operators/Source-or-Flow/ask.md | 2 +- .../Source-or-Flow/backpressureTimeout.md | 2 +- .../stream/operators/Source-or-Flow/batch.md | 2 +- .../operators/Source-or-Flow/batchWeighted.md | 2 +- .../stream/operators/Source-or-Flow/buffer.md | 2 +- .../operators/Source-or-Flow/collect.md | 2 +- .../operators/Source-or-Flow/collectType.md | 2 +- .../Source-or-Flow/completionTimeout.md | 2 +- .../stream/operators/Source-or-Flow/concat.md | 2 +- .../operators/Source-or-Flow/conflate.md | 2 +- .../Source-or-Flow/conflateWithSeed.md | 2 +- .../stream/operators/Source-or-Flow/delay.md | 2 +- .../stream/operators/Source-or-Flow/detach.md | 2 +- .../operators/Source-or-Flow/divertTo.md | 2 +- .../stream/operators/Source-or-Flow/drop.md | 2 +- .../operators/Source-or-Flow/dropWhile.md | 2 +- .../operators/Source-or-Flow/dropWithin.md | 2 +- .../stream/operators/Source-or-Flow/expand.md | 2 +- .../operators/Source-or-Flow/extrapolate.md | 2 +- .../stream/operators/Source-or-Flow/filter.md | 2 +- .../operators/Source-or-Flow/filterNot.md | 2 +- .../operators/Source-or-Flow/flatMapConcat.md | 2 +- .../operators/Source-or-Flow/flatMapMerge.md | 2 +- .../stream/operators/Source-or-Flow/fold.md | 2 +- .../operators/Source-or-Flow/foldAsync.md | 2 +- .../operators/Source-or-Flow/groupBy.md | 2 +- .../operators/Source-or-Flow/grouped.md | 2 +- .../Source-or-Flow/groupedWeightedWithin.md | 2 +- .../operators/Source-or-Flow/groupedWithin.md | 2 +- .../operators/Source-or-Flow/idleTimeout.md | 2 +- .../operators/Source-or-Flow/initialDelay.md | 2 +- .../Source-or-Flow/initialTimeout.md | 2 +- .../operators/Source-or-Flow/interleave.md | 2 +- .../operators/Source-or-Flow/intersperse.md | 2 +- .../operators/Source-or-Flow/keepAlive.md | 2 +- .../stream/operators/Source-or-Flow/limit.md | 2 +- .../operators/Source-or-Flow/limitWeighted.md | 2 +- .../stream/operators/Source-or-Flow/log.md | 6 +- .../stream/operators/Source-or-Flow/map.md | 4 +- .../operators/Source-or-Flow/mapAsync.md | 2 +- .../Source-or-Flow/mapAsyncUnordered.md | 2 +- .../operators/Source-or-Flow/mapConcat.md | 2 +- .../operators/Source-or-Flow/mapError.md | 2 +- .../stream/operators/Source-or-Flow/merge.md | 2 +- .../operators/Source-or-Flow/mergeSorted.md | 2 +- .../operators/Source-or-Flow/monitor.md | 2 +- .../stream/operators/Source-or-Flow/orElse.md | 2 +- .../operators/Source-or-Flow/prefixAndTail.md | 2 +- .../operators/Source-or-Flow/prepend.md | 2 +- .../operators/Source-or-Flow/recover.md | 2 +- .../operators/Source-or-Flow/recoverWith.md | 2 +- .../Source-or-Flow/recoverWithRetries.md | 2 +- .../stream/operators/Source-or-Flow/reduce.md | 2 +- .../stream/operators/Source-or-Flow/scan.md | 2 +- .../operators/Source-or-Flow/scanAsync.md | 2 +- .../operators/Source-or-Flow/sliding.md | 2 +- .../operators/Source-or-Flow/splitAfter.md | 2 +- .../operators/Source-or-Flow/splitWhen.md | 2 +- .../Source-or-Flow/statefulMapConcat.md | 2 +- .../stream/operators/Source-or-Flow/take.md | 2 +- .../operators/Source-or-Flow/takeWhile.md | 2 +- .../operators/Source-or-Flow/takeWithin.md | 2 +- .../operators/Source-or-Flow/throttle.md | 2 +- .../stream/operators/Source-or-Flow/watch.md | 2 +- .../Source-or-Flow/watchTermination.md | 2 +- .../operators/Source-or-Flow/wireTap.md | 2 +- .../stream/operators/Source-or-Flow/zip.md | 2 +- .../operators/Source-or-Flow/zipWith.md | 2 +- .../operators/Source-or-Flow/zipWithIndex.md | 2 +- .../stream/operators/Source/actorRef.md | 2 +- .../stream/operators/Source/asSubscriber.md | 2 +- .../stream/operators/Source/combine.md | 4 +- .../paradox/stream/operators/Source/cycle.md | 2 +- .../paradox/stream/operators/Source/empty.md | 2 +- .../paradox/stream/operators/Source/failed.md | 2 +- .../paradox/stream/operators/Source/from.md | 4 +- .../operators/Source/fromCompletionStage.md | 2 +- .../stream/operators/Source/fromFuture.md | 4 +- .../operators/Source/fromFutureSource.md | 2 +- .../stream/operators/Source/fromIterator.md | 2 +- .../stream/operators/Source/fromPublisher.md | 2 +- .../paradox/stream/operators/Source/lazily.md | 2 +- .../paradox/stream/operators/Source/maybe.md | 2 +- .../paradox/stream/operators/Source/queue.md | 6 +- .../paradox/stream/operators/Source/range.md | 4 +- .../paradox/stream/operators/Source/repeat.md | 2 +- .../paradox/stream/operators/Source/single.md | 6 +- .../paradox/stream/operators/Source/tick.md | 2 +- .../paradox/stream/operators/Source/unfold.md | 2 +- .../stream/operators/Source/unfoldAsync.md | 2 +- .../stream/operators/Source/unfoldResource.md | 2 +- .../operators/Source/unfoldResourceAsync.md | 2 +- .../paradox/stream/operators/Source/zipN.md | 2 +- .../stream/operators/Source/zipWithN.md | 2 +- .../StreamConverters/asInputStream.md | 2 +- .../StreamConverters/asJavaStream.md | 2 +- .../StreamConverters/asOutputStream.md | 2 +- .../StreamConverters/fromInputStream.md | 2 +- .../StreamConverters/fromJavaStream.md | 2 +- .../StreamConverters/fromOutputStream.md | 2 +- .../StreamConverters/javaCollector.md | 2 +- .../javaCollectorParallelUnordered.md | 2 +- .../main/paradox/stream/stream-composition.md | 58 ++-- .../main/paradox/stream/stream-cookbook.md | 120 ++++---- .../main/paradox/stream/stream-customize.md | 58 ++-- .../src/main/paradox/stream/stream-dynamic.md | 56 ++-- .../src/main/paradox/stream/stream-error.md | 56 ++-- .../paradox/stream/stream-flows-and-basics.md | 48 +-- .../src/main/paradox/stream/stream-graphs.md | 86 +++--- .../paradox/stream/stream-integrations.md | 126 ++++---- .../src/main/paradox/stream/stream-io.md | 32 +- .../main/paradox/stream/stream-parallelism.md | 16 +- .../main/paradox/stream/stream-quickstart.md | 98 +++---- .../src/main/paradox/stream/stream-rate.md | 66 ++--- .../src/main/paradox/stream/stream-refs.md | 22 +- .../main/paradox/stream/stream-substream.md | 32 +- .../src/main/paradox/stream/stream-testkit.md | 40 +-- akka-docs/src/main/paradox/testing.md | 124 ++++---- akka-docs/src/main/paradox/typed-actors.md | 76 ++--- .../src/main/paradox/typed/actor-discovery.md | 12 +- .../src/main/paradox/typed/actor-lifecycle.md | 24 +- akka-docs/src/main/paradox/typed/actors.md | 36 +-- .../main/paradox/typed/cluster-sharding.md | 20 +- .../main/paradox/typed/cluster-singleton.md | 8 +- akka-docs/src/main/paradox/typed/cluster.md | 30 +- .../src/main/paradox/typed/coexisting.md | 34 +-- .../src/main/paradox/typed/dispatchers.md | 8 +- .../src/main/paradox/typed/fault-tolerance.md | 28 +- akka-docs/src/main/paradox/typed/fsm.md | 12 +- .../paradox/typed/interaction-patterns.md | 40 +-- .../src/main/paradox/typed/persistence.md | 80 ++--- akka-docs/src/main/paradox/typed/stash.md | 4 +- akka-docs/src/main/paradox/typed/stream.md | 12 +- akka-docs/src/main/paradox/typed/testing.md | 60 ++-- build.sbt | 2 - project/ParadoxSupport.scala | 9 +- 217 files changed, 2022 insertions(+), 2025 deletions(-) diff --git a/akka-docs/src/main/paradox/actors.md b/akka-docs/src/main/paradox/actors.md index 7011e81410..7f2901d471 100644 --- a/akka-docs/src/main/paradox/actors.md +++ b/akka-docs/src/main/paradox/actors.md @@ -61,10 +61,10 @@ the messages should be processed. You can build such behavior with a builder nam Here is an example: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } Java -: @@snip [MyActor.java]($code$/java/jdocs/actor/MyActor.java) { #imports #my-actor } +: @@snip [MyActor.java](/akka-docs/src/test/java/jdocs/actor/MyActor.java) { #imports #my-actor } Please note that the Akka Actor @scala[`receive`] message loop is exhaustive, which is different compared to Erlang and the late Scala Actors. This means that you @@ -89,7 +89,7 @@ construction. #### Here is another example that you can edit and run in the browser: -@@fiddle [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #fiddle_code template=Akka layout=v75 minheight=400px } +@@fiddle [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #fiddle_code template=Akka layout=v75 minheight=400px } @@@ @@ -102,10 +102,10 @@ dispatcher to use, see more below). Here are some examples of how to create a `Props` instance. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #creating-props } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #creating-props } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-props #creating-props } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #import-props #creating-props } The second variant shows how to pass constructor arguments to the @@ -127,10 +127,10 @@ for cases when the actor constructor takes value classes as arguments. #### Dangerous Variants Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #creating-props-deprecated } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #creating-props-deprecated } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #creating-props-deprecated } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #creating-props-deprecated } This method is not recommended to be used within another actor because it encourages to close over the enclosing scope, resulting in non-serializable @@ -162,13 +162,13 @@ There are two edge cases in actor creation with `Props`: * An actor with `AnyVal` arguments. -@@snip [PropsEdgeCaseSpec.scala]($code$/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-value-class } +@@snip [PropsEdgeCaseSpec.scala](/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-value-class } -@@snip [PropsEdgeCaseSpec.scala]($code$/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-value-class-example } +@@snip [PropsEdgeCaseSpec.scala](/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-value-class-example } * An actor with default constructor values. -@@snip [PropsEdgeCaseSpec.scala]($code$/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-default-values } +@@snip [PropsEdgeCaseSpec.scala](/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-default-values } In both cases an `IllegalArgumentException` will be thrown stating no matching constructor could be found. @@ -189,10 +189,10 @@ arguments as constructor parameters, since within static method] the given code block will not retain a reference to its enclosing scope: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #props-factory } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #props-factory } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #props-factory } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #props-factory } Another good practice is to declare what messages an Actor can receive @scala[in the companion object of the Actor] @@ -200,10 +200,10 @@ Another good practice is to declare what messages an Actor can receive which makes easier to know what it can receive: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #messages-in-companion } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #messages-in-companion } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #messages-in-companion } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #messages-in-companion } ### Creating Actors with Props @@ -212,20 +212,20 @@ Actors are created by passing a `Props` instance into the `ActorContext`. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #system-actorOf } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #system-actorOf } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-actorRef } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #import-actorRef } Using the `ActorSystem` will create top-level actors, supervised by the actor system’s provided guardian actor, while using an actor’s context will create a child actor. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #context-actorOf } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #context-actorOf } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #context-actorOf } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #context-actorOf } It is recommended to create a hierarchy of children, grand-children and so on such that it fits the logical failure-handling structure of the application, @@ -258,7 +258,7 @@ value classes. In these cases you should either unpack the arguments or create the props by calling the constructor manually: -@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #actor-with-value-class-argument } +@@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #actor-with-value-class-argument } @@@ @@ -270,10 +270,10 @@ are cases when a factory method must be used, for example when the actual constructor arguments are determined by a dependency injection framework. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #creating-indirectly } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #creating-indirectly } Java -: @@snip [DependencyInjectionDocTest.java]($code$/java/jdocs/actor/DependencyInjectionDocTest.java) { #import #creating-indirectly } +: @@snip [DependencyInjectionDocTest.java](/akka-docs/src/test/java/jdocs/actor/DependencyInjectionDocTest.java) { #import #creating-indirectly } @@@ warning @@ -301,10 +301,10 @@ to a notification service) and watching other actors’ lifecycle. For these purposes there is the `Inbox` class: Scala -: @@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #inbox } +: @@snip [ActorDSLSpec.scala](/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #inbox } Java -: @@snip [InboxDocTest.java]($code$/java/jdocs/actor/InboxDocTest.java) { #inbox } +: @@snip [InboxDocTest.java](/akka-docs/src/test/java/jdocs/actor/InboxDocTest.java) { #inbox } @@@ div { .group-scala } @@ -314,7 +314,7 @@ in this example the sender reference will be that of the actor hidden away within the inbox. This allows the reply to be received on the last line. Watching an actor is quite simple as well: -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #watch } +@@snip [ActorDSLSpec.scala](/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #watch } @@@ @@ -324,7 +324,7 @@ The `send` method wraps a normal `tell` and supplies the internal actor’s reference as the sender. This allows the reply to be received on the last line. Watching an actor is quite simple as well: -@@snip [InboxDocTest.java]($code$/java/jdocs/actor/InboxDocTest.java) { #watch } +@@snip [InboxDocTest.java](/akka-docs/src/test/java/jdocs/actor/InboxDocTest.java) { #watch } @@@ @@ -371,7 +371,7 @@ time). You can import the members in the `context` to avoid prefixing access with `context.` -@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #import-context } +@@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #import-context } @@@ @@ -379,10 +379,10 @@ The remaining visible methods are user-overridable life-cycle hooks which are described in the following: Scala -: @@snip [Actor.scala]($akka$/akka-actor/src/main/scala/akka/actor/Actor.scala) { #lifecycle-hooks } +: @@snip [Actor.scala](/akka-actor/src/main/scala/akka/actor/Actor.scala) { #lifecycle-hooks } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #lifecycle-callbacks } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #lifecycle-callbacks } The implementations shown above are the defaults provided by the @scala[`Actor` trait.] @java[`AbstractActor` class.] @@ -455,10 +455,10 @@ termination (see [Stopping Actors](#stopping-actors)). This service is provided Registering a monitor is easy: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #watch } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #watch } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-terminated #watch } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #import-terminated #watch } It should be noted that the `Terminated` message is generated independent of the order in which registration and termination occur. @@ -484,10 +484,10 @@ no `Terminated` message for that actor will be processed anymore. Right after starting the actor, its `preStart` method is invoked. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #preStart } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #preStart } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #preStart } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #preStart } This method is called when the actor is first created. During restarts it is called by the default implementation of `postRestart`, which means that @@ -561,10 +561,10 @@ paths—logical or physical—and receive back an `ActorSelection` with the result: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #selection-local } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #selection-local } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #selection-local } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #selection-local } @@@ note @@ -593,10 +593,10 @@ The path elements of an actor selection may contain wildcard patterns allowing f broadcasting of messages to that section: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #selection-wildcard } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #selection-wildcard } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #selection-wildcard } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #selection-wildcard } Messages can be sent via the `ActorSelection` and the path of the `ActorSelection` is looked up when delivering each message. If the selection @@ -613,10 +613,10 @@ negative result is generated. Please note that this does not mean that delivery of that reply is guaranteed, it still is a normal message. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #identify } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #identify } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-identify #identify } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #import-identify #identify } You can also acquire an `ActorRef` for an `ActorSelection` with the `resolveOne` method of the `ActorSelection`. It returns a `Future` @@ -628,10 +628,10 @@ didn't complete within the supplied `timeout`. Remote actor addresses may also be looked up, if @ref:[remoting](remoting.md) is enabled: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #selection-remote } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #selection-remote } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #selection-remote } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #selection-remote } An example demonstrating actor look-up is given in @ref:[Remoting Sample](remoting.md#remote-sample). @@ -650,10 +650,10 @@ state) and works great with pattern matching at the receiver side.] Here is an @scala[example:] @java[example of an immutable message:] Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #immutable-message-definition #immutable-message-instantiation } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #immutable-message-definition #immutable-message-instantiation } Java -: @@snip [ImmutableMessage.java]($code$/java/jdocs/actor/ImmutableMessage.java) { #immutable-message } +: @@snip [ImmutableMessage.java](/akka-docs/src/test/java/jdocs/actor/ImmutableMessage.java) { #immutable-message } ## Send messages @@ -691,10 +691,10 @@ This is the preferred way of sending messages. No blocking waiting for a message. This gives the best concurrency and scalability characteristics. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #tell } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #tell } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #tell } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #tell } @@@ div { .group-scala } @@ -728,10 +728,10 @@ The `ask` pattern involves actors as well as futures, hence it is offered as a use pattern rather than a method on `ActorRef`: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #ask-pipeTo } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #ask-pipeTo } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-ask #ask-pipe } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #import-ask #ask-pipe } This example demonstrates `ask` together with the `pipeTo` pattern on @@ -767,10 +767,10 @@ are treated specially by the ask pattern. @@@ Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #reply-exception } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #reply-exception } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #reply-exception } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #reply-exception } If the actor does not complete the future, it will expire after the timeout period, @scala[completing it with an `AskTimeoutException`. The timeout is taken from one of the following locations in order of precedence:] @@ -780,11 +780,11 @@ If the actor does not complete the future, it will expire after the timeout peri 1. explicitly given timeout as in: - @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #using-explicit-timeout } + @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #using-explicit-timeout } 2. implicit argument of type `akka.util.Timeout`, e.g. - @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #using-implicit-timeout } + @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #using-implicit-timeout } @@@ @@ -816,10 +816,10 @@ through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #forward } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #forward } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #forward } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #forward } ## Receive messages @@ -829,10 +829,10 @@ An Actor has to @java[define its initial receive behavior by implementing the `createReceive` method in the `AbstractActor`:] Scala -: @@snip [Actor.scala]($akka$/akka-actor/src/main/scala/akka/actor/Actor.scala) { #receive } +: @@snip [Actor.scala](/akka-actor/src/main/scala/akka/actor/Actor.scala) { #receive } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #createReceive } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #createReceive } @@@ div { .group-scala } @@ -851,23 +851,23 @@ You can build such behavior with a builder named `ReceiveBuilder`. Here is an ex @@@ Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } Java -: @@snip [MyActor.java]($code$/java/jdocs/actor/MyActor.java) { #imports #my-actor } +: @@snip [MyActor.java](/akka-docs/src/test/java/jdocs/actor/MyActor.java) { #imports #my-actor } @@@ div { .group-java } In case you want to provide many `match` cases but want to avoid creating a long call trail, you can split the creation of the builder into multiple statements as in the example: -@@snip [GraduallyBuiltActor.java]($code$/java/jdocs/actor/GraduallyBuiltActor.java) { #imports #actor } +@@snip [GraduallyBuiltActor.java](/akka-docs/src/test/java/jdocs/actor/GraduallyBuiltActor.java) { #imports #actor } Using small methods is a good practice, also in actors. It's recommended to delegate the actual work of the message processing to methods instead of defining a huge `ReceiveBuilder` with lots of code in each lambda. A well structured actor can look like this: -@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #well-structured } +@@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #well-structured } That has benefits such as: @@ -889,7 +889,7 @@ that the JVM can have problems optimizing and the resulting code might not be as untyped version. When extending `UntypedAbstractActor` each message is received as an untyped `Object` and you have to inspect and cast it to the actual message type in other ways, like this: -@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #optimized } +@@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #optimized } @@@ @@ -904,10 +904,10 @@ message was sent without an actor or future context) then the sender defaults to a 'dead-letter' actor ref. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #reply-without-sender } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #reply-without-sender } Java -: @@snip [MyActor.java]($code$/java/jdocs/actor/MyActor.java) { #reply } +: @@snip [MyActor.java](/akka-docs/src/test/java/jdocs/actor/MyActor.java) { #reply } ## Receive timeout @@ -924,10 +924,10 @@ Once set, the receive timeout stays in effect (i.e. continues firing repeatedly periods). Pass in `Duration.Undefined` to switch off this feature. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #receive-timeout } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #receive-timeout } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #receive-timeout } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #receive-timeout } Messages marked with `NotInfluenceReceiveTimeout` will not reset the timer. This can be useful when `ReceiveTimeout` should be fired by external inactivity but not influenced by internal activity, @@ -943,10 +943,10 @@ to use the support for named timers. The lifecycle of scheduled messages can be when the actor is restarted and that is taken care of by the timers. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/TimerDocSpec.scala) { #timers } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala) { #timers } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/TimerDocTest.java) { #timers } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TimerDocTest.java) { #timers } Each timer has a key and can be replaced or cancelled. It's guaranteed that a message from the previous incarnation of the timer with the same key is not received, even though it might already @@ -966,10 +966,10 @@ termination of the actor is performed asynchronously, i.e. `stop` may return bef the actor is stopped. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #stoppingActors-actor } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #stoppingActors-actor } Java -: @@snip [MyStoppingActor.java]($code$/java/jdocs/actor/MyStoppingActor.java) { #my-stopping-actor } +: @@snip [MyStoppingActor.java](/akka-docs/src/test/java/jdocs/actor/MyStoppingActor.java) { #my-stopping-actor } Processing of the current message, if any, will continue before the actor is stopped, @@ -997,10 +997,10 @@ The `postStop()` hook is invoked after an actor is fully stopped. This enables cleaning up of resources: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #postStop } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #postStop } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #postStop } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #postStop } @@@ note @@ -1021,10 +1021,10 @@ ordinary messages and will be handled after messages that were already queued in the mailbox. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #poison-pill } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #poison-pill } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #poison-pill } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #poison-pill } ### Killing an Actor @@ -1038,10 +1038,10 @@ See @ref:[What Supervision Means](general/supervision.md#supervision-directives) Use `Kill` like this: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #kill } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #kill } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #kill } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #kill } In general though it is not recommended to overly rely on either `PoisonPill` or `Kill` in designing your actor interactions, as often times a protocol-level message like `PleaseCleanupAndStop` @@ -1054,10 +1054,10 @@ over which design you do not have control over. termination of several actors: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #gracefulStop} +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #gracefulStop} Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-gracefulStop #gracefulStop} +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #import-gracefulStop #gracefulStop} When `gracefulStop()` returns successfully, the actor’s `postStop()` hook will have been executed: there exists a happens-before edge between the end of @@ -1088,7 +1088,7 @@ services in a specific order and perform registered tasks during the shutdown pr The order of the shutdown phases is defined in configuration `akka.coordinated-shutdown.phases`. The default phases are defined as: -@@snip [reference.conf]($akka$/akka-actor/src/main/resources/reference.conf) { #coordinated-shutdown-phases } +@@snip [reference.conf](/akka-actor/src/main/resources/reference.conf) { #coordinated-shutdown-phases } More phases can be added in the application's configuration if needed by overriding a phase with an additional `depends-on`. Especially the phases `before-service-unbind`, `before-cluster-shutdown` and @@ -1101,10 +1101,10 @@ The phases are ordered with [topological](https://en.wikipedia.org/wiki/Topologi Tasks can be added to a phase with: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-addTask } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-addTask } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-addTask } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-addTask } The returned @scala[`Future[Done]`] @java[`CompletionStage`] should be completed when the task is completed. The task name parameter is only used for debugging/logging. @@ -1124,10 +1124,10 @@ To start the coordinated shutdown process you can invoke @scala[`run`] @java[`ru extension: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-run } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-run } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-run } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-run } It's safe to call the @scala[`run`] @java[`runAll`] method multiple times. It will only run once. @@ -1157,10 +1157,10 @@ If you have application specific JVM shutdown hooks it's recommended that you re those shutting down Akka Remoting (Artery). Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-jvm-hook } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-jvm-hook } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-jvm-hook } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-jvm-hook } For some tests it might be undesired to terminate the `ActorSystem` via `CoordinatedShutdown`. You can disable that by adding the following to the configuration of the `ActorSystem` that is @@ -1193,10 +1193,10 @@ Please note that the actor will revert to its original behavior when restarted b To hotswap the Actor behavior using `become`: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #hot-swap-actor } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #hot-swap-actor } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #hot-swap-actor } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #hot-swap-actor } This variant of the `become` method is useful for many different things, such as to implement a Finite State Machine (FSM, for an example see @scala[[Dining @@ -1212,10 +1212,10 @@ in the long run, otherwise this amounts to a memory leak (which is why this behavior is not the default). Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #swapper } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #swapper } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #swapper } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #swapper } ### Encoding Scala Actors nested receives without accidentally leaking memory @@ -1257,10 +1257,10 @@ control over the mailbox, see the documentation on mailboxes: @ref:[Mailboxes](m Here is an example of the @scala[`Stash`] @java[`AbstractActorWithStash` class] in action: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #stash } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #stash } Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #stash } +: @@snip [ActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java) { #stash } Invoking `stash()` adds the current message (the message that the actor received last) to the actor's stash. It is typically invoked @@ -1348,7 +1348,7 @@ For example, imagine you have a set of actors which are either `Producers` or `C have an actor share both behaviors. This can be achieved without having to duplicate code by extracting the behaviors to traits and implementing the actor's `receive` as combination of these partial functions. -@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #receive-orElse } +@@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #receive-orElse } Instead of inheritance the same pattern can be applied via composition - compose the receive method using partial functions from delegates. @@ -1384,10 +1384,10 @@ One useful usage of this pattern is to disable creation of new `ActorRefs` for c achieved by overriding `preRestart()`. Below is the default implementation of these lifecycle hooks: Scala -: @@snip [InitializationDocSpec.scala]($code$/scala/docs/actor/InitializationDocSpec.scala) { #preStartInit } +: @@snip [InitializationDocSpec.scala](/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala) { #preStartInit } Java -: @@snip [InitializationDocTest.java]($code$/java/jdocs/actor/InitializationDocTest.java) { #preStartInit } +: @@snip [InitializationDocTest.java](/akka-docs/src/test/java/jdocs/actor/InitializationDocTest.java) { #preStartInit } Please note, that the child actors are *still restarted*, but no new `ActorRef` is created. One can recursively apply @@ -1404,10 +1404,10 @@ and use `become()` or a finite state-machine state transition to encode the init of the actor. Scala -: @@snip [InitializationDocSpec.scala]($code$/scala/docs/actor/InitializationDocSpec.scala) { #messageInit } +: @@snip [InitializationDocSpec.scala](/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala) { #messageInit } Java -: @@snip [InitializationDocTest.java]($code$/java/jdocs/actor/InitializationDocTest.java) { #messageInit } +: @@snip [InitializationDocTest.java](/akka-docs/src/test/java/jdocs/actor/InitializationDocTest.java) { #messageInit } If the actor may receive messages before it has been initialized, a useful tool can be the `Stash` to save messages until the initialization finishes, and replaying them after the actor became initialized. diff --git a/akka-docs/src/main/paradox/additional/faq.md b/akka-docs/src/main/paradox/additional/faq.md index 8602e9e9bb..95e19b9883 100644 --- a/akka-docs/src/main/paradox/additional/faq.md +++ b/akka-docs/src/main/paradox/additional/faq.md @@ -79,7 +79,7 @@ exhaustiveness. Here is an example where the compiler will warn you that the match in receive isn't exhaustive: -@@snip [Faq.scala]($code$/scala/docs/faq/Faq.scala) { #exhaustiveness-check } +@@snip [Faq.scala](/akka-docs/src/test/scala/docs/faq/Faq.scala) { #exhaustiveness-check } ## Remoting diff --git a/akka-docs/src/main/paradox/additional/osgi.md b/akka-docs/src/main/paradox/additional/osgi.md index fc4f9788f3..3d6a4bd1eb 100644 --- a/akka-docs/src/main/paradox/additional/osgi.md +++ b/akka-docs/src/main/paradox/additional/osgi.md @@ -112,8 +112,8 @@ dynamic in this way. ActorRefs may safely be exposed to other bundles. To bootstrap Akka inside an OSGi environment, you can use the `akka.osgi.ActorSystemActivator` class to conveniently set up the ActorSystem. -@@snip [Activator.scala]($akka$/akka-osgi/src/test/scala/docs/osgi/Activator.scala) { #Activator } +@@snip [Activator.scala](/akka-osgi/src/test/scala/docs/osgi/Activator.scala) { #Activator } The goal here is to map the OSGi lifecycle more directly to the Akka lifecycle. The `ActorSystemActivator` creates the actor system with a class loader that finds resources (`application.conf` and `reference.conf` files) and classes -from the application bundle and all transitive dependencies. \ No newline at end of file +from the application bundle and all transitive dependencies. diff --git a/akka-docs/src/main/paradox/agents.md b/akka-docs/src/main/paradox/agents.md index ef3b34607e..4e05c236e4 100644 --- a/akka-docs/src/main/paradox/agents.md +++ b/akka-docs/src/main/paradox/agents.md @@ -55,10 +55,10 @@ value and providing an @scala[implicit] `ExecutionContext` to be used for it, @scala[for these examples we're going to use the default global one, but YMMV:] Scala -: @@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #create } +: @@snip [AgentDocSpec.scala](/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala) { #create } Java -: @@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-agent #create type=java } +: @@snip [AgentDocTest.java](/akka-docs/src/test/java/jdocs/agent/AgentDocTest.java) { #import-agent #create type=java } ## Reading an Agent's value @@ -66,10 +66,10 @@ Agents can be dereferenced (you can get an Agent's value) by invoking the Agent with @scala[parentheses] @java[`get()`] like this: Scala -: @@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #read-apply #read-get } +: @@snip [AgentDocSpec.scala](/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala) { #read-apply #read-get } Java -: @@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #read-get type=java } +: @@snip [AgentDocTest.java](/akka-docs/src/test/java/jdocs/agent/AgentDocTest.java) { #read-get type=java } Reading an Agent's current value does not involve any message passing and happens immediately. So while updates to an Agent are asynchronous, reading the @@ -80,7 +80,7 @@ state of an Agent is synchronous. You can also get a `Future` to the Agents value, that will be completed after the currently queued updates have completed: -@@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-future #read-future type=java } +@@snip [AgentDocTest.java](/akka-docs/src/test/java/jdocs/agent/AgentDocTest.java) { #import-future #read-future type=java } See @ref:[Futures](futures.md) for more information on `Futures`. @@ -97,10 +97,10 @@ occur in order. You apply a value or a function by invoking the `send` function. Scala -: @@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #send } +: @@snip [AgentDocSpec.scala](/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala) { #send } Java -: @@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-function #send type=java } +: @@snip [AgentDocTest.java](/akka-docs/src/test/java/jdocs/agent/AgentDocTest.java) { #import-function #send type=java } You can also dispatch a function to update the internal state but on its own thread. This does not use the reactive thread pool and can be used for @@ -109,19 +109,19 @@ method. Dispatches using either `sendOff` or `send` will still be executed in order. Scala -: @@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #send-off } +: @@snip [AgentDocSpec.scala](/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala) { #send-off } Java -: @@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-function #send-off type=java } +: @@snip [AgentDocTest.java](/akka-docs/src/test/java/jdocs/agent/AgentDocTest.java) { #import-function #send-off type=java } All `send` methods also have a corresponding `alter` method that returns a `Future`. See @ref:[`Future`s](futures.md) for more information on `Future`s. Scala -: @@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #alter #alter-off } +: @@snip [AgentDocSpec.scala](/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala) { #alter #alter-off } Java -: @@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-future #import-function #alter #alter-off type=java } +: @@snip [AgentDocTest.java](/akka-docs/src/test/java/jdocs/agent/AgentDocTest.java) { #import-future #import-function #alter #alter-off type=java } @@@ div { .group-scala } @@ -130,7 +130,7 @@ Java You can also get a `Future` to the Agents value, that will be completed after the currently queued updates have completed: -@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #read-future } +@@snip [AgentDocSpec.scala](/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala) { #read-future } See @ref:[`Future`s](futures.md) for more information on `Future`s. @@ -143,7 +143,7 @@ as-is. They are so-called 'persistent'. Example of monadic usage: -@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #monadic-example } +@@snip [AgentDocSpec.scala](/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala) { #monadic-example } @@@ @@ -163,6 +163,6 @@ transaction is aborted. @scala[Here's an example:] @@@ div { .group-scala } -@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #transfer-example } +@@snip [AgentDocSpec.scala](/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala) { #transfer-example } -@@@ \ No newline at end of file +@@@ diff --git a/akka-docs/src/main/paradox/camel.md b/akka-docs/src/main/paradox/camel.md index 312a1072d1..490c9df14b 100644 --- a/akka-docs/src/main/paradox/camel.md +++ b/akka-docs/src/main/paradox/camel.md @@ -54,10 +54,10 @@ APIs. The [camel-extra](http://code.google.com/p/camel-extra/) project provides Here's an example of using Camel's integration components in Akka. Scala -: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #Consumer-mina } +: @@snip [Introduction.scala](/akka-docs/src/test/scala/docs/camel/Introduction.scala) { #Consumer-mina } Java -: @@snip [MyEndpoint.java]($code$/java/jdocs/camel/MyEndpoint.java) { #Consumer-mina } +: @@snip [MyEndpoint.java](/akka-docs/src/test/java/jdocs/camel/MyEndpoint.java) { #Consumer-mina } The above example exposes an actor over a TCP endpoint via Apache Camel's [Mina component](http://camel.apache.org/mina2.html). The actor implements the @scala[`endpointUri`]@java[`getEndpointUri`] method to define @@ -68,7 +68,7 @@ component), the actor's @scala[`endpointUri`]@java[`getEndpointUri`] method shou @@@ div { .group-scala } -@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #Consumer } +@@snip [Introduction.scala](/akka-docs/src/test/scala/docs/camel/Introduction.scala) { #Consumer } @@@ @@ -85,10 +85,10 @@ Actors can also trigger message exchanges with external systems i.e. produce to Camel endpoints. Scala -: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #imports #Producer } +: @@snip [Introduction.scala](/akka-docs/src/test/scala/docs/camel/Introduction.scala) { #imports #Producer } Java -: @@snip [Orders.java]($code$/java/jdocs/camel/Orders.java) { #Producer } +: @@snip [Orders.java](/akka-docs/src/test/java/jdocs/camel/Orders.java) { #Producer } In the above example, any message sent to this actor will be sent to the JMS queue @scala[`orders`]@java[`Orders`]. Producer actors may choose from the same set of Camel @@ -98,7 +98,7 @@ components as Consumer actors do. Below an example of how to send a message to the `Orders` producer. -@@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #TellProducer } +@@snip [ProducerTestBase.java](/akka-docs/src/test/java/jdocs/camel/ProducerTestBase.java) { #TellProducer } @@@ @@ -127,10 +127,10 @@ The @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) @sca Below you can see how you can get access to these Apache Camel objects. Scala -: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelExtension } +: @@snip [Introduction.scala](/akka-docs/src/test/scala/docs/camel/Introduction.scala) { #CamelExtension } Java -: @@snip [CamelExtensionTest.java]($code$/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtension } +: @@snip [CamelExtensionTest.java](/akka-docs/src/test/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtension } One `CamelExtension` is only loaded once for every one `ActorSystem`, which makes it safe to call the `CamelExtension` at any point in your code to get to the Apache Camel objects associated with it. There is one [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and one `ProducerTemplate` for every one `ActorSystem` that uses a `CamelExtension`. @@ -141,10 +141,10 @@ This interface define a single method `getContext()` used to load the [CamelCont Below an example on how to add the ActiveMQ component to the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java), which is required when you would like to use the ActiveMQ component. Scala -: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelExtensionAddComponent } +: @@snip [Introduction.scala](/akka-docs/src/test/scala/docs/camel/Introduction.scala) { #CamelExtensionAddComponent } Java -: @@snip [CamelExtensionTest.java]($code$/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtensionAddComponent } +: @@snip [CamelExtensionTest.java](/akka-docs/src/test/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtensionAddComponent } The [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) joins the lifecycle of the `ActorSystem` and `CamelExtension` it is associated with; the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) is started when the `CamelExtension` is created, and it is shut down when the associated `ActorSystem` is shut down. The same is true for the `ProducerTemplate`. @@ -159,19 +159,19 @@ requested the actor to be created. Some Camel components can take a while to sta The @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) @scala[trait]@java[interface] allows you to find out when the endpoint is activated or deactivated. Scala -: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelActivation } +: @@snip [Introduction.scala](/akka-docs/src/test/scala/docs/camel/Introduction.scala) { #CamelActivation } Java -: @@snip [ActivationTestBase.java]($code$/java/jdocs/camel/ActivationTestBase.java) { #CamelActivation } +: @@snip [ActivationTestBase.java](/akka-docs/src/test/java/jdocs/camel/ActivationTestBase.java) { #CamelActivation } The above code shows that you can get a `Future` to the activation of the route from the endpoint to the actor, or you can wait in a blocking fashion on the activation of the route. An `ActivationTimeoutException` is thrown if the endpoint could not be activated within the specified timeout. Deactivation works in a similar fashion: Scala -: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelDeactivation } +: @@snip [Introduction.scala](/akka-docs/src/test/scala/docs/camel/Introduction.scala) { #CamelDeactivation } Java -: @@snip [ActivationTestBase.java]($code$/java/jdocs/camel/ActivationTestBase.java) { #CamelDeactivation } +: @@snip [ActivationTestBase.java](/akka-docs/src/test/java/jdocs/camel/ActivationTestBase.java) { #CamelDeactivation } Deactivation of a Consumer or a Producer actor happens when the actor is terminated. For a Consumer, the route to the actor is stopped. For a Producer, the [SendProcessor](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java) is stopped. A `DeActivationTimeoutException` is thrown if the associated camel objects could not be deactivated within the specified timeout. @@ -184,10 +184,10 @@ For example, the following actor class (Consumer1) implements the messages from the `file:data/input/actor` Camel endpoint. Scala -: @@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer1 } +: @@snip [Consumers.scala](/akka-docs/src/test/scala/docs/camel/Consumers.scala) { #Consumer1 } Java -: @@snip [Consumer1.java]($code$/java/jdocs/camel/Consumer1.java) { #Consumer1 } +: @@snip [Consumer1.java](/akka-docs/src/test/java/jdocs/camel/Consumer1.java) { #Consumer1 } Whenever a file is put into the data/input/actor directory, its content is picked up by the Camel [file component](http://camel.apache.org/file2.html) and sent as message to the @@ -200,10 +200,10 @@ component to start an embedded [Jetty](http://www.eclipse.org/jetty/) server, ac from localhost on port 8877. Scala -: @@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer2 } +: @@snip [Consumers.scala](/akka-docs/src/test/scala/docs/camel/Consumers.scala) { #Consumer2 } Java -: @@snip [Consumer2.java]($code$/java/jdocs/camel/Consumer2.java) { #Consumer2 } +: @@snip [Consumer2.java](/akka-docs/src/test/java/jdocs/camel/Consumer2.java) { #Consumer2 } After starting the actor, clients can send messages to that actor by POSTing to `http://localhost:8877/camel/default`. The actor sends a response by using the @@ -231,10 +231,10 @@ special akka.camel.Ack message (positive acknowledgement) or a akka.actor.Status acknowledgement). Scala -: @@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer3 } +: @@snip [Consumers.scala](/akka-docs/src/test/scala/docs/camel/Consumers.scala) { #Consumer3 } Java -: @@snip [Consumer3.java]($code$/java/jdocs/camel/Consumer3.java) { #Consumer3 } +: @@snip [Consumer3.java](/akka-docs/src/test/java/jdocs/camel/Consumer3.java) { #Consumer3 } ### Consumer timeout @@ -252,10 +252,10 @@ result in the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0 The timeout on the consumer actor can be overridden with the `replyTimeout`, as shown below. Scala -: @@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer4 } +: @@snip [Consumers.scala](/akka-docs/src/test/scala/docs/camel/Consumers.scala) { #Consumer4 } Java -: @@snip [Consumer4.java]($code$/java/jdocs/camel/Consumer4.java) { #Consumer4 } +: @@snip [Consumer4.java](/akka-docs/src/test/java/jdocs/camel/Consumer4.java) { #Consumer4 } ## Producer Actors @@ -263,10 +263,10 @@ For sending messages to Camel endpoints, actors need to @scala[mixin the @extref @java[inherit from the @extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class] and implement the `getEndpointUri` method. Scala -: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Producer1 } +: @@snip [Producers.scala](/akka-docs/src/test/scala/docs/camel/Producers.scala) { #Producer1 } Java -: @@snip [Producer1.java]($code$/java/jdocs/camel/Producer1.java) { #Producer1 } +: @@snip [Producer1.java](/akka-docs/src/test/java/jdocs/camel/Producer1.java) { #Producer1 } Producer1 inherits a default implementation of the @scala[`receive`]@java[`onReceive`] method from the @scala[Producer trait]@java[@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala)] class. To customize a producer actor's default behavior you must override the @@ -282,10 +282,10 @@ following example uses the ask pattern to send a message to a Producer actor and waits for a response. Scala -: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #AskProducer } +: @@snip [Producers.scala](/akka-docs/src/test/scala/docs/camel/Producers.scala) { #AskProducer } Java -: @@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #AskProducer } +: @@snip [ProducerTestBase.java](/akka-docs/src/test/java/jdocs/camel/ProducerTestBase.java) { #AskProducer } The future contains the response `CamelMessage`, or an `AkkaCamelException` when an error occurred, which contains the headers of the response. @@ -298,22 +298,22 @@ message is forwarded to a target actor instead of being replied to the original sender. Scala -: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #RouteResponse } +: @@snip [Producers.scala](/akka-docs/src/test/scala/docs/camel/Producers.scala) { #RouteResponse } Java -: @@snip [ResponseReceiver.java]($code$/java/jdocs/camel/ResponseReceiver.java) { #RouteResponse } - @@snip [Forwarder.java]($code$/java/jdocs/camel/Forwarder.java) { #RouteResponse } - @@snip [OnRouteResponseTestBase.java]($code$/java/jdocs/camel/OnRouteResponseTestBase.java) { #RouteResponse } +: @@snip [ResponseReceiver.java](/akka-docs/src/test/java/jdocs/camel/ResponseReceiver.java) { #RouteResponse } + @@snip [Forwarder.java](/akka-docs/src/test/java/jdocs/camel/Forwarder.java) { #RouteResponse } + @@snip [OnRouteResponseTestBase.java](/akka-docs/src/test/java/jdocs/camel/OnRouteResponseTestBase.java) { #RouteResponse } Before producing messages to endpoints, producer actors can pre-process them by overriding the @scala[@extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).transformOutgoingMessage] @java[@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformOutgoingMessag] method. Scala -: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #TransformOutgoingMessage } +: @@snip [Producers.scala](/akka-docs/src/test/scala/docs/camel/Producers.scala) { #TransformOutgoingMessage } Java -: @@snip [Transformer.java]($code$/java/jdocs/camel/Transformer.java) { #TransformOutgoingMessage } +: @@snip [Transformer.java](/akka-docs/src/test/java/jdocs/camel/Transformer.java) { #TransformOutgoingMessage } ### Producer configuration options @@ -323,10 +323,10 @@ respectively). By default, the producer initiates an in-out message exchange with the endpoint. For initiating an in-only exchange, producer actors have to override the @scala[`oneway`]@java[`isOneway`] method to return true. Scala -: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Oneway } +: @@snip [Producers.scala](/akka-docs/src/test/scala/docs/camel/Producers.scala) { #Oneway } Java -: @@snip [OnewaySender.java]($code$/java/jdocs/camel/OnewaySender.java) { #Oneway } +: @@snip [OnewaySender.java](/akka-docs/src/test/java/jdocs/camel/OnewaySender.java) { #Oneway } ### Message correlation @@ -334,10 +334,10 @@ To correlate request with response messages, applications can set the `Message.MessageExchangeId` message header. Scala -: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Correlate } +: @@snip [Producers.scala](/akka-docs/src/test/scala/docs/camel/Producers.scala) { #Correlate } Java -: @@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #Correlate } +: @@snip [ProducerTestBase.java](/akka-docs/src/test/java/jdocs/camel/ProducerTestBase.java) { #Correlate } ### ProducerTemplate @@ -346,19 +346,19 @@ convenient way for actors to produce messages to Camel endpoints. Actors may als `ProducerTemplate` for producing messages to endpoints. Scala -: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #ProducerTemplate } +: @@snip [Producers.scala](/akka-docs/src/test/scala/docs/camel/Producers.scala) { #ProducerTemplate } Java -: @@snip [MyActor.java]($code$/java/jdocs/camel/MyActor.java) { #ProducerTemplate } +: @@snip [MyActor.java](/akka-docs/src/test/java/jdocs/camel/MyActor.java) { #ProducerTemplate } For initiating a two-way message exchange, one of the `ProducerTemplate.request*` methods must be used. Scala -: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #RequestProducerTemplate } +: @@snip [Producers.scala](/akka-docs/src/test/scala/docs/camel/Producers.scala) { #RequestProducerTemplate } Java -: @@snip [RequestBodyActor.java]($code$/java/jdocs/camel/RequestBodyActor.java) { #RequestProducerTemplate } +: @@snip [RequestBodyActor.java](/akka-docs/src/test/java/jdocs/camel/RequestBodyActor.java) { #RequestProducerTemplate } ## Asynchronous routing @@ -463,12 +463,12 @@ reference an `ActorRef` directly as shown in the below example, The route starts ends at the target actor. Scala -: @@snip [CustomRoute.scala]($code$/scala/docs/camel/CustomRoute.scala) { #CustomRoute } +: @@snip [CustomRoute.scala](/akka-docs/src/test/scala/docs/camel/CustomRoute.scala) { #CustomRoute } Java -: @@snip [Responder.java]($code$/java/jdocs/camel/Responder.java) { #CustomRoute } - @@snip [CustomRouteBuilder.java]($code$/java/jdocs/camel/CustomRouteBuilder.java) { #CustomRoute } - @@snip [CustomRouteTestBase.java]($code$/java/jdocs/camel/CustomRouteTestBase.java) { #CustomRoute } +: @@snip [Responder.java](/akka-docs/src/test/java/jdocs/camel/Responder.java) { #CustomRoute } + @@snip [CustomRouteBuilder.java](/akka-docs/src/test/java/jdocs/camel/CustomRouteBuilder.java) { #CustomRoute } + @@snip [CustomRouteTestBase.java](/akka-docs/src/test/java/jdocs/camel/CustomRouteTestBase.java) { #CustomRoute } @java[The `CamelPath.toCamelUri` converts the `ActorRef` to the Camel actor component URI format which points to the actor endpoint as described above.] When a message is received on the jetty endpoint, it is routed to the `Responder` actor, which in return replies back to the client of @@ -487,10 +487,10 @@ The following examples demonstrate how to extend a route to a consumer actor for handling exceptions thrown by that actor. Scala -: @@snip [CustomRoute.scala]($code$/scala/docs/camel/CustomRoute.scala) { #ErrorThrowingConsumer } +: @@snip [CustomRoute.scala](/akka-docs/src/test/scala/docs/camel/CustomRoute.scala) { #ErrorThrowingConsumer } Java -: @@snip [ErrorThrowingConsumer.java]($code$/java/jdocs/camel/ErrorThrowingConsumer.java) { #ErrorThrowingConsumer } +: @@snip [ErrorThrowingConsumer.java](/akka-docs/src/test/java/jdocs/camel/ErrorThrowingConsumer.java) { #ErrorThrowingConsumer } The above ErrorThrowingConsumer sends the Failure back to the sender in preRestart because the Exception that is thrown in the actor would diff --git a/akka-docs/src/main/paradox/cluster-client.md b/akka-docs/src/main/paradox/cluster-client.md index b8393e9a1e..5c0bcf2998 100644 --- a/akka-docs/src/main/paradox/cluster-client.md +++ b/akka-docs/src/main/paradox/cluster-client.md @@ -105,28 +105,28 @@ akka.extensions = ["akka.cluster.client.ClusterClientReceptionist"] Next, register the actors that should be available for the client. Scala -: @@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #server } +: @@snip [ClusterClientSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #server } Java -: @@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #server } +: @@snip [ClusterClientTest.java](/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #server } On the client you create the @unidoc[ClusterClient] actor and use it as a gateway for sending messages to the actors identified by their path (without address information) somewhere in the cluster. Scala -: @@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #client } +: @@snip [ClusterClientSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #client } Java -: @@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #client } +: @@snip [ClusterClientTest.java](/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #client } The `initialContacts` parameter is a @scala[`Set[ActorPath]`]@java[`Set`], which can be created like this: Scala -: @@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #initialContacts } +: @@snip [ClusterClientSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #initialContacts } Java -: @@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #initialContacts } +: @@snip [ClusterClientTest.java](/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #initialContacts } You will probably define the address information of the initial contact points in configuration or system property. See also [Configuration](#cluster-client-config). @@ -160,18 +160,18 @@ receptionists), as they become available. The code illustrates subscribing to th initial state. Scala -: @@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #clientEventsListener } +: @@snip [ClusterClientSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #clientEventsListener } Java -: @@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #clientEventsListener } +: @@snip [ClusterClientTest.java](/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #clientEventsListener } Similarly we can have an actor that behaves in a similar fashion for learning what cluster clients are connected to a @unidoc[ClusterClientReceptionist]: Scala -: @@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #receptionistEventsListener } +: @@snip [ClusterClientSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #receptionistEventsListener } Java -: @@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #receptionistEventsListener } +: @@snip [ClusterClientTest.java](/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #receptionistEventsListener } ## Configuration @@ -179,7 +179,7 @@ Java The @unidoc[ClusterClientReceptionist] extension (or @unidoc[akka.cluster.client.ClusterReceptionistSettings]) can be configured with the following properties: -@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #receptionist-ext-config } +@@snip [reference.conf](/akka-cluster-tools/src/main/resources/reference.conf) { #receptionist-ext-config } The following configuration properties are read by the @unidoc[ClusterClientSettings] when created with a @scala[@scaladoc[`ActorSystem`](akka.actor.ActorSystem)]@java[@javadoc[`ActorSystem`](akka.actor.ActorSystem)] parameter. It is also possible to amend the @unidoc[ClusterClientSettings] @@ -187,7 +187,7 @@ or create it from another config section with the same layout as below. @unidoc[ a parameter to the @scala[@scaladoc[`ClusterClient.props`](akka.cluster.client.ClusterClient$)]@java[@javadoc[`ClusterClient.props`](akka.cluster.client.ClusterClient$)] factory method, i.e. each client can be configured with different settings if needed. -@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #cluster-client-config } +@@snip [reference.conf](/akka-cluster-tools/src/main/resources/reference.conf) { #cluster-client-config } ## Failure handling diff --git a/akka-docs/src/main/paradox/cluster-dc.md b/akka-docs/src/main/paradox/cluster-dc.md index 3bf0e5e21c..e47874d5ef 100644 --- a/akka-docs/src/main/paradox/cluster-dc.md +++ b/akka-docs/src/main/paradox/cluster-dc.md @@ -98,10 +98,10 @@ if you see this in log messages. You can retrieve information about what data center a member belongs to: Scala -: @@snip [ClusterDocSpec.scala]($code$/scala/docs/cluster/ClusterDocSpec.scala) { #dcAccess } +: @@snip [ClusterDocSpec.scala](/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala) { #dcAccess } Java -: @@snip [ClusterDocTest.java]($code$/java/jdocs/cluster/ClusterDocTest.java) { #dcAccess } +: @@snip [ClusterDocTest.java](/akka-docs/src/test/java/jdocs/cluster/ClusterDocTest.java) { #dcAccess } ## Failure Detection @@ -156,10 +156,10 @@ having a global singleton in one data center and accessing it from other data ce This is how to create a singleton proxy for a specific data center: Scala -: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-proxy-dc } +: @@snip [ClusterSingletonManagerSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-proxy-dc } Java -: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-proxy-dc } +: @@snip [ClusterSingletonManagerTest.java](/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-proxy-dc } If using the own data center as the `withDataCenter` parameter that would be a proxy for the singleton in the own data center, which is also the default if `withDataCenter` is not given. @@ -193,10 +193,10 @@ accessing them from other data centers. This is how to create a sharding proxy for a specific data center: Scala -: @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #proxy-dc } +: @@snip [ClusterShardingSpec.scala](/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #proxy-dc } Java -: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #proxy-dc } +: @@snip [ClusterShardingTest.java](/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java) { #proxy-dc } Another way to manage global entities is to make sure that certain entity ids are located in only one data center by routing the messages to the right region. For example, the routing function diff --git a/akka-docs/src/main/paradox/cluster-metrics.md b/akka-docs/src/main/paradox/cluster-metrics.md index 1d39e69f60..0aca5c42cf 100644 --- a/akka-docs/src/main/paradox/cluster-metrics.md +++ b/akka-docs/src/main/paradox/cluster-metrics.md @@ -138,18 +138,18 @@ Let's take a look at this router in action. What can be more demanding than calc The backend worker that performs the factorial calculation: Scala -: @@snip [FactorialBackend.scala]($code$/scala/docs/cluster/FactorialBackend.scala) { #backend } +: @@snip [FactorialBackend.scala](/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala) { #backend } Java -: @@snip [FactorialBackend.java]($code$/java/jdocs/cluster/FactorialBackend.java) { #backend } +: @@snip [FactorialBackend.java](/akka-docs/src/test/java/jdocs/cluster/FactorialBackend.java) { #backend } The frontend that receives user jobs and delegates to the backends via the router: Scala -: @@snip [FactorialFrontend.scala]($code$/scala/docs/cluster/FactorialFrontend.scala) { #frontend } +: @@snip [FactorialFrontend.scala](/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala) { #frontend } Java -: @@snip [FactorialFrontend.java]($code$/java/jdocs/cluster/FactorialFrontend.java) { #frontend } +: @@snip [FactorialFrontend.java](/akka-docs/src/test/java/jdocs/cluster/FactorialFrontend.java) { #frontend } As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows: @@ -180,10 +180,10 @@ other things work in the same way as other routers. The same type of router could also have been defined in code: Scala -: @@snip [FactorialFrontend.scala]($code$/scala/docs/cluster/FactorialFrontend.scala) { #router-lookup-in-code #router-deploy-in-code } +: @@snip [FactorialFrontend.scala](/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala) { #router-lookup-in-code #router-deploy-in-code } Java -: @@snip [FactorialFrontend.java]($code$/java/jdocs/cluster/FactorialFrontend.java) { #router-lookup-in-code #router-deploy-in-code } +: @@snip [FactorialFrontend.java](/akka-docs/src/test/java/jdocs/cluster/FactorialFrontend.java) { #router-lookup-in-code #router-deploy-in-code } The easiest way to run **Adaptive Load Balancing** example yourself is to download the ready to run @scala[@extref[Akka Cluster Sample with Scala](ecs:akka-samples-cluster-scala)] @java[@extref[Akka Cluster Sample with Java](ecs:akka-samples-cluster-java)] @@ -196,10 +196,10 @@ The source code of this sample can be found in the It is possible to subscribe to the metrics events directly to implement other functionality. Scala -: @@snip [MetricsListener.scala]($code$/scala/docs/cluster/MetricsListener.scala) { #metrics-listener } +: @@snip [MetricsListener.scala](/akka-docs/src/test/scala/docs/cluster/MetricsListener.scala) { #metrics-listener } Java -: @@snip [MetricsListener.java]($code$/java/jdocs/cluster/MetricsListener.java) { #metrics-listener } +: @@snip [MetricsListener.java](/akka-docs/src/test/java/jdocs/cluster/MetricsListener.java) { #metrics-listener } ## Custom Metrics Collector @@ -217,4 +217,4 @@ Custom metrics collector implementation class must be specified in the The Cluster metrics extension can be configured with the following properties: -@@snip [reference.conf]($akka$/akka-cluster-metrics/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-cluster-metrics/src/main/resources/reference.conf) diff --git a/akka-docs/src/main/paradox/cluster-routing.md b/akka-docs/src/main/paradox/cluster-routing.md index f2fef7baf9..eb8f8bef96 100644 --- a/akka-docs/src/main/paradox/cluster-routing.md +++ b/akka-docs/src/main/paradox/cluster-routing.md @@ -72,10 +72,10 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: Scala -: @@snip [StatsService.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #router-lookup-in-code } +: @@snip [StatsService.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #router-lookup-in-code } Java -: @@snip [StatsService.java]($code$/java/jdocs/cluster/StatsService.java) { #router-lookup-in-code } +: @@snip [StatsService.java](/akka-docs/src/test/java/jdocs/cluster/StatsService.java) { #router-lookup-in-code } See [configuration](#cluster-configuration) section for further descriptions of the settings. @@ -93,31 +93,31 @@ the average number of characters per word when all results have been collected. Messages: Scala -: @@snip [StatsMessages.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsMessages.scala) { #messages } +: @@snip [StatsMessages.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsMessages.scala) { #messages } Java -: @@snip [StatsMessages.java]($code$/java/jdocs/cluster/StatsMessages.java) { #messages } +: @@snip [StatsMessages.java](/akka-docs/src/test/java/jdocs/cluster/StatsMessages.java) { #messages } The worker that counts number of characters in each word: Scala -: @@snip [StatsWorker.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala) { #worker } +: @@snip [StatsWorker.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala) { #worker } Java -: @@snip [StatsWorker.java]($code$/java/jdocs/cluster/StatsWorker.java) { #worker } +: @@snip [StatsWorker.java](/akka-docs/src/test/java/jdocs/cluster/StatsWorker.java) { #worker } The service that receives text from users and splits it up into words, delegates to workers and aggregates: @@@ div { .group-scala } -@@snip [StatsService.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #service } +@@snip [StatsService.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #service } @@@ @@@ div { .group-java } -@@snip [StatsService.java]($code$/java/jdocs/cluster/StatsService.java) { #service } -@@snip [StatsAggregator.java]($code$/java/jdocs/cluster/StatsAggregator.java) { #aggregator } +@@snip [StatsService.java](/akka-docs/src/test/java/jdocs/cluster/StatsService.java) { #service } +@@snip [StatsAggregator.java](/akka-docs/src/test/java/jdocs/cluster/StatsAggregator.java) { #aggregator } @@@ @@ -180,10 +180,10 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: Scala -: @@snip [StatsService.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #router-deploy-in-code } +: @@snip [StatsService.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #router-deploy-in-code } Java -: @@snip [StatsService.java]($code$/java/jdocs/cluster/StatsService.java) { #router-deploy-in-code } +: @@snip [StatsService.java](/akka-docs/src/test/java/jdocs/cluster/StatsService.java) { #router-deploy-in-code } See [configuration](#cluster-configuration) section for further descriptions of the settings. @@ -206,7 +206,7 @@ Scala @@@ Java -: @@snip [StatsSampleOneMasterMain.java]($code$/java/jdocs/cluster/StatsSampleOneMasterMain.java) { #create-singleton-manager } +: @@snip [StatsSampleOneMasterMain.java](/akka-docs/src/test/java/jdocs/cluster/StatsSampleOneMasterMain.java) { #create-singleton-manager } We also need an actor on each node that keeps track of where current single master exists and delegates jobs to the `StatsService`. That is provided by the `ClusterSingletonProxy`: @@ -223,7 +223,7 @@ Scala @@@ Java -: @@snip [StatsSampleOneMasterMain.java]($code$/java/jdocs/cluster/StatsSampleOneMasterMain.java) { #singleton-proxy } +: @@snip [StatsSampleOneMasterMain.java](/akka-docs/src/test/java/jdocs/cluster/StatsSampleOneMasterMain.java) { #singleton-proxy } The `ClusterSingletonProxy` receives text from users and delegates to the current `StatsService`, the single master. It listens to cluster events to lookup the `StatsService` on the oldest node. diff --git a/akka-docs/src/main/paradox/cluster-sharding.md b/akka-docs/src/main/paradox/cluster-sharding.md index 27965f98da..afba0174c5 100644 --- a/akka-docs/src/main/paradox/cluster-sharding.md +++ b/akka-docs/src/main/paradox/cluster-sharding.md @@ -55,10 +55,10 @@ See @ref:[Downing](cluster-usage.md#automatic-vs-manual-downing). This is how an entity actor may look like: Scala -: @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-actor } +: @@snip [ClusterShardingSpec.scala](/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-actor } Java -: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-actor } +: @@snip [ClusterShardingTest.java](/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java) { #counter-actor } The above actor uses event sourcing and the support provided in @scala[`PersistentActor`] @java[`AbstractPersistentActor`] to store its state. It does not have to be a persistent actor, but in case of failure or migration of entities between nodes it must be able to recover @@ -75,19 +75,19 @@ in case if there is no match between the roles of the current cluster node and t `ClusterShardingSettings`. Scala -: @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-start } +: @@snip [ClusterShardingSpec.scala](/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-start } Java -: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-start } +: @@snip [ClusterShardingTest.java](/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java) { #counter-start } The @scala[`extractEntityId` and `extractShardId` are two] @java[`messageExtractor` defines] application specific @scala[functions] @java[methods] to extract the entity identifier and the shard identifier from incoming messages. Scala -: @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-extractor } +: @@snip [ClusterShardingSpec.scala](/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-extractor } Java -: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-extractor } +: @@snip [ClusterShardingTest.java](/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java) { #counter-extractor } This example illustrates two different ways to define the entity identifier in the messages: @@ -122,10 +122,10 @@ delegate the message to the right node and it will create the entity actor on de first message for a specific entity is delivered. Scala -: @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-usage } +: @@snip [ClusterShardingSpec.scala](/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-usage } Java -: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-usage } +: @@snip [ClusterShardingTest.java](/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java) { #counter-usage } @@@ div { .group-scala } @@ -344,10 +344,10 @@ the `rememberEntities` flag to true in `ClusterShardingSettings` when calling extract from the `EntityId`. Scala -: @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #extractShardId-StartEntity } +: @@snip [ClusterShardingSpec.scala](/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #extractShardId-StartEntity } Java -: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #extractShardId-StartEntity } +: @@snip [ClusterShardingTest.java](/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java) { #extractShardId-StartEntity } When configured to remember entities, whenever a `Shard` is rebalanced onto another node or recovers after a crash it will recreate all the entities which were previously @@ -381,18 +381,18 @@ you need to create an intermediate parent actor that defines the `supervisorStra child entity actor. Scala -: @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #supervisor } +: @@snip [ClusterShardingSpec.scala](/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #supervisor } Java -: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #supervisor } +: @@snip [ClusterShardingTest.java](/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java) { #supervisor } You start such a supervisor in the same way as if it was the entity actor. Scala -: @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-supervisor-start } +: @@snip [ClusterShardingSpec.scala](/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-supervisor-start } Java -: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-supervisor-start } +: @@snip [ClusterShardingTest.java](/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java) { #counter-supervisor-start } Note that stopped entities will be started again when a new message is targeted to the entity. @@ -466,7 +466,7 @@ with the same layout as below. `ClusterShardingSettings` is a parameter to the ` the `ClusterSharding` extension, i.e. each each entity type can be configured with different settings if needed. -@@snip [reference.conf]($akka$/akka-cluster-sharding/src/main/resources/reference.conf) { #sharding-ext-config } +@@snip [reference.conf](/akka-cluster-sharding/src/main/resources/reference.conf) { #sharding-ext-config } Custom shard allocation strategy can be defined in an optional parameter to `ClusterSharding.start`. See the API documentation of @scala[`ShardAllocationStrategy`] @java[`AbstractShardAllocationStrategy`] for details diff --git a/akka-docs/src/main/paradox/cluster-singleton.md b/akka-docs/src/main/paradox/cluster-singleton.md index 7a33d53eda..08a1a4f1ea 100644 --- a/akka-docs/src/main/paradox/cluster-singleton.md +++ b/akka-docs/src/main/paradox/cluster-singleton.md @@ -99,19 +99,19 @@ Before explaining how to create a cluster singleton actor, let's define message which will be used by the singleton. Scala -: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #singleton-message-classes } +: @@snip [ClusterSingletonManagerSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #singleton-message-classes } Java -: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/TestSingletonMessages.java) { #singleton-message-classes } +: @@snip [ClusterSingletonManagerTest.java](/akka-cluster-tools/src/test/java/akka/cluster/singleton/TestSingletonMessages.java) { #singleton-message-classes } On each node in the cluster you need to start the `ClusterSingletonManager` and supply the `Props` of the singleton actor, in this case the JMS queue consumer. Scala -: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-manager } +: @@snip [ClusterSingletonManagerSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-manager } Java -: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-manager } +: @@snip [ClusterSingletonManagerTest.java](/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-manager } Here we limit the singleton to nodes tagged with the `"worker"` role, but all nodes, independent of role, can be used by not specifying `withRole`. @@ -123,19 +123,19 @@ perfectly fine `terminationMessage` if you only need to stop the actor. Here is how the singleton actor handles the `terminationMessage` in this example. Scala -: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #consumer-end } +: @@snip [ClusterSingletonManagerSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #consumer-end } Java -: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/Consumer.java) { #consumer-end } +: @@snip [ClusterSingletonManagerTest.java](/akka-cluster-tools/src/test/java/akka/cluster/singleton/Consumer.java) { #consumer-end } With the names given above, access to the singleton can be obtained from any cluster node using a properly configured proxy. Scala -: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-proxy } +: @@snip [ClusterSingletonManagerSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-proxy } Java -: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-proxy } +: @@snip [ClusterSingletonManagerTest.java](/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-proxy } A more comprehensive sample is available in the tutorial named @scala[[Distributed workers with Akka and Scala!](https://github.com/typesafehub/activator-akka-distributed-workers)]@java[[Distributed workers with Akka and Java!](https://github.com/typesafehub/activator-akka-distributed-workers-java)]. @@ -148,7 +148,7 @@ or create it from another config section with the same layout as below. `Cluster a parameter to the `ClusterSingletonManager.props` factory method, i.e. each singleton can be configured with different settings if needed. -@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-config } +@@snip [reference.conf](/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-config } The following configuration properties are read by the `ClusterSingletonProxySettings` when created with a `ActorSystem` parameter. It is also possible to amend the `ClusterSingletonProxySettings` @@ -156,23 +156,23 @@ or create it from another config section with the same layout as below. `Cluster a parameter to the `ClusterSingletonProxy.props` factory method, i.e. each singleton proxy can be configured with different settings if needed. -@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-proxy-config } +@@snip [reference.conf](/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-proxy-config } ## Supervision Sometimes it is useful to add supervision for the Cluster Singleton itself. To accomplish this you need to add a parent supervisor actor which will be used to create the 'real' singleton instance. Below is an example implementation (credit to [this StackOverflow answer](https://stackoverflow.com/a/36716708/779513)) Scala -: @@snip [ClusterSingletonSupervision.scala]($akka$/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala) { #singleton-supervisor-actor } +: @@snip [ClusterSingletonSupervision.scala](/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala) { #singleton-supervisor-actor } Java -: @@snip [SupervisorActor.java]($akka$/akka-docs/src/test/java/jdocs/cluster/singleton/SupervisorActor.java) { #singleton-supervisor-actor } +: @@snip [SupervisorActor.java](/akka-docs/src/test/java/jdocs/cluster/singleton/SupervisorActor.java) { #singleton-supervisor-actor } And used here Scala -: @@snip [ClusterSingletonSupervision.scala]($akka$/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala) { #singleton-supervisor-actor-usage } +: @@snip [ClusterSingletonSupervision.scala](/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala) { #singleton-supervisor-actor-usage } Java -: @@snip [ClusterSingletonSupervision.java]($akka$/akka-docs/src/test/java/jdocs/cluster/singleton/ClusterSingletonSupervision.java) { #singleton-supervisor-actor-usage-imports } -@@snip [ClusterSingletonSupervision.java]($akka$/akka-docs/src/test/java/jdocs/cluster/singleton/ClusterSingletonSupervision.java) { #singleton-supervisor-actor-usage } +: @@snip [ClusterSingletonSupervision.java](/akka-docs/src/test/java/jdocs/cluster/singleton/ClusterSingletonSupervision.java) { #singleton-supervisor-actor-usage-imports } +@@snip [ClusterSingletonSupervision.java](/akka-docs/src/test/java/jdocs/cluster/singleton/ClusterSingletonSupervision.java) { #singleton-supervisor-actor-usage } diff --git a/akka-docs/src/main/paradox/cluster-usage.md b/akka-docs/src/main/paradox/cluster-usage.md index 724a967130..5187e6dab4 100644 --- a/akka-docs/src/main/paradox/cluster-usage.md +++ b/akka-docs/src/main/paradox/cluster-usage.md @@ -155,10 +155,10 @@ ip-addresses or host names of the machines in `application.conf` instead of `127 An actor that uses the cluster extension may look like this: Scala -: @@snip [SimpleClusterListener.scala]($code$/scala/docs/cluster/SimpleClusterListener.scala) { type=scala } +: @@snip [SimpleClusterListener.scala](/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala) { type=scala } Java -: @@snip [SimpleClusterListener.java]($code$/java/jdocs/cluster/SimpleClusterListener.java) { type=java } +: @@snip [SimpleClusterListener.java](/akka-docs/src/test/java/jdocs/cluster/SimpleClusterListener.java) { type=java } The actor registers itself as subscriber of certain cluster events. It receives events corresponding to the current state of the cluster when the subscription starts and then it receives events for changes that happen in the cluster. @@ -239,10 +239,10 @@ supposed to be the first seed node, and that should be placed first in the param `joinSeedNodes`. Scala -: @@snip [ClusterDocSpec.scala]($code$/scala/docs/cluster/ClusterDocSpec.scala) { #join-seed-nodes } +: @@snip [ClusterDocSpec.scala](/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala) { #join-seed-nodes } Java -: @@snip [ClusterDocTest.java]($code$/java/jdocs/cluster/ClusterDocTest.java) { #join-seed-nodes-imports #join-seed-nodes } +: @@snip [ClusterDocTest.java](/akka-docs/src/test/java/jdocs/cluster/ClusterDocTest.java) { #join-seed-nodes-imports #join-seed-nodes } Unsuccessful attempts to contact seed nodes are automatically retried after the time period defined in configuration property `seed-node-timeout`. Unsuccessful attempt to join a specific seed node is @@ -367,10 +367,10 @@ This can be performed using [JMX](#cluster-jmx) or [HTTP](#cluster-http). It can also be performed programmatically with: Scala -: @@snip [ClusterDocSpec.scala]($code$/scala/docs/cluster/ClusterDocSpec.scala) { #leave } +: @@snip [ClusterDocSpec.scala](/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala) { #leave } Java -: @@snip [ClusterDocTest.java]($code$/java/jdocs/cluster/ClusterDocTest.java) { #leave } +: @@snip [ClusterDocTest.java](/akka-docs/src/test/java/jdocs/cluster/ClusterDocTest.java) { #leave } Note that this command can be issued to any member in the cluster, not necessarily the one that is leaving. @@ -413,10 +413,10 @@ You can subscribe to change notifications of the cluster membership by using @scala[`Cluster(system).subscribe`]@java[`Cluster.get(system).subscribe`]. Scala -: @@snip [SimpleClusterListener2.scala]($code$/scala/docs/cluster/SimpleClusterListener2.scala) { #subscribe } +: @@snip [SimpleClusterListener2.scala](/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala) { #subscribe } Java -: @@snip [SimpleClusterListener2.java]($code$/java/jdocs/cluster/SimpleClusterListener2.java) { #subscribe } +: @@snip [SimpleClusterListener2.java](/akka-docs/src/test/java/jdocs/cluster/SimpleClusterListener2.java) { #subscribe } A snapshot of the full state, `akka.cluster.ClusterEvent.CurrentClusterState`, is sent to the subscriber as the first message, followed by events for incremental updates. @@ -434,10 +434,10 @@ listening to the events when they occurred in the past. Note that those initial to the current state and it is not the full history of all changes that actually has occurred in the cluster. Scala -: @@snip [SimpleClusterListener.scala]($code$/scala/docs/cluster/SimpleClusterListener.scala) { #subscribe } +: @@snip [SimpleClusterListener.scala](/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala) { #subscribe } Java -: @@snip [SimpleClusterListener.java]($code$/java/jdocs/cluster/SimpleClusterListener.java) { #subscribe } +: @@snip [SimpleClusterListener.java](/akka-docs/src/test/java/jdocs/cluster/SimpleClusterListener.java) { #subscribe } The events to track the life-cycle of members are: @@ -473,18 +473,18 @@ added or removed to the cluster dynamically. Messages: Scala -: @@snip [TransformationMessages.scala]($code$/scala/docs/cluster/TransformationMessages.scala) { #messages } +: @@snip [TransformationMessages.scala](/akka-docs/src/test/scala/docs/cluster/TransformationMessages.scala) { #messages } Java -: @@snip [TransformationMessages.java]($code$/java/jdocs/cluster/TransformationMessages.java) { #messages } +: @@snip [TransformationMessages.java](/akka-docs/src/test/java/jdocs/cluster/TransformationMessages.java) { #messages } The backend worker that performs the transformation job: Scala -: @@snip [TransformationBackend.scala]($code$/scala/docs/cluster/TransformationBackend.scala) { #backend } +: @@snip [TransformationBackend.scala](/akka-docs/src/test/scala/docs/cluster/TransformationBackend.scala) { #backend } Java -: @@snip [TransformationBackend.java]($code$/java/jdocs/cluster/TransformationBackend.java) { #backend } +: @@snip [TransformationBackend.java](/akka-docs/src/test/java/jdocs/cluster/TransformationBackend.java) { #backend } Note that the `TransformationBackend` actor subscribes to cluster events to detect new, potential, frontend nodes, and send them a registration message so that they know @@ -493,10 +493,10 @@ that they can use the backend worker. The frontend that receives user jobs and delegates to one of the registered backend workers: Scala -: @@snip [TransformationFrontend.scala]($code$/scala/docs/cluster/TransformationFrontend.scala) { #frontend } +: @@snip [TransformationFrontend.scala](/akka-docs/src/test/scala/docs/cluster/TransformationFrontend.scala) { #frontend } Java -: @@snip [TransformationFrontend.java]($code$/java/jdocs/cluster/TransformationFrontend.java) { #frontend } +: @@snip [TransformationFrontend.java](/akka-docs/src/test/java/jdocs/cluster/TransformationFrontend.java) { #frontend } Note that the `TransformationFrontend` actor watch the registered backend to be able to remove it from its list of available backend workers. @@ -551,10 +551,10 @@ be invoked when the current member status is changed to 'Up', i.e. the cluster has at least the defined number of members. Scala -: @@snip [FactorialFrontend.scala]($code$/scala/docs/cluster/FactorialFrontend.scala) { #registerOnUp } +: @@snip [FactorialFrontend.scala](/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala) { #registerOnUp } Java -: @@snip [FactorialFrontendMain.java]($code$/java/jdocs/cluster/FactorialFrontendMain.java) { #registerOnUp } +: @@snip [FactorialFrontendMain.java](/akka-docs/src/test/java/jdocs/cluster/FactorialFrontendMain.java) { #registerOnUp } This callback can be used for other things than starting actors. @@ -721,12 +721,12 @@ add the `sbt-multi-jvm` plugin and the dependency to `akka-multi-node-testkit`. First, as described in @ref:[Multi Node Testing](multi-node-testing.md), we need some scaffolding to configure the `MultiNodeSpec`. Define the participating roles and their [configuration](#cluster-configuration) in an object extending `MultiNodeConfig`: -@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #MultiNodeConfig } +@@snip [StatsSampleSpec.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #MultiNodeConfig } Define one concrete test class for each role/node. These will be instantiated on the different nodes (JVMs). They can be implemented differently, but often they are the same and extend an abstract test class, as illustrated here. -@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #concrete-tests } +@@snip [StatsSampleSpec.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #concrete-tests } Note the naming convention of these classes. The name of the classes must end with `MultiJvmNode1`, `MultiJvmNode2` and so on. It is possible to define another suffix to be used by the `sbt-multi-jvm`, but the default should be @@ -734,18 +734,18 @@ fine in most cases. Then the abstract `MultiNodeSpec`, which takes the `MultiNodeConfig` as constructor parameter. -@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #abstract-test } +@@snip [StatsSampleSpec.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #abstract-test } Most of this can be extracted to a separate trait to avoid repeating this in all your tests. Typically you begin your test by starting up the cluster and let the members join, and create some actors. That can be done like this: -@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #startup-cluster } +@@snip [StatsSampleSpec.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #startup-cluster } From the test you interact with the cluster using the `Cluster` extension, e.g. `join`. -@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #join } +@@snip [StatsSampleSpec.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #join } Notice how the *testActor* from @ref:[testkit](testing.md) is added as [subscriber](#cluster-subscriber) to cluster changes and then waiting for certain events, such as in this case all members becoming 'Up'. @@ -753,7 +753,7 @@ to cluster changes and then waiting for certain events, such as in this case all The above code was running for all roles (JVMs). `runOn` is a convenient utility to declare that a certain block of code should only run for a specific role. -@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #test-statsService } +@@snip [StatsSampleSpec.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #test-statsService } Once again we take advantage of the facilities in @ref:[testkit](testing.md) to verify expected behavior. Here using `testActor` as sender (via `ImplicitSender`) and verifying the reply with `expectMsgPF`. @@ -761,7 +761,7 @@ Here using `testActor` as sender (via `ImplicitSender`) and verifying the reply In the above code you can see `node(third)`, which is useful facility to get the root actor reference of the actor system for a specific role. This can also be used to grab the `akka.actor.Address` of that node. -@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #addresses } +@@snip [StatsSampleSpec.scala](/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #addresses } @@@ diff --git a/akka-docs/src/main/paradox/common/circuitbreaker.md b/akka-docs/src/main/paradox/common/circuitbreaker.md index 4da9c7a8bc..85e8b65719 100644 --- a/akka-docs/src/main/paradox/common/circuitbreaker.md +++ b/akka-docs/src/main/paradox/common/circuitbreaker.md @@ -73,10 +73,10 @@ Here's how a `CircuitBreaker` would be configured for: Scala -: @@snip [CircuitBreakerDocSpec.scala]($code$/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #imports1 #circuit-breaker-initialization } +: @@snip [CircuitBreakerDocSpec.scala](/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #imports1 #circuit-breaker-initialization } Java -: @@snip [DangerousJavaActor.java]($code$/java/jdocs/circuitbreaker/DangerousJavaActor.java) { #imports1 #circuit-breaker-initialization } +: @@snip [DangerousJavaActor.java](/akka-docs/src/test/java/jdocs/circuitbreaker/DangerousJavaActor.java) { #imports1 #circuit-breaker-initialization } ### Future & Synchronous based API @@ -85,10 +85,10 @@ Once a circuit breaker actor has been initialized, interacting with that actor i The Synchronous API would also wrap your call with the circuit breaker logic, however, it uses the `withSyncCircuitBreaker` and receives a method that is not wrapped in a `Future`. Scala -: @@snip [CircuitBreakerDocSpec.scala]($code$/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #circuit-breaker-usage } +: @@snip [CircuitBreakerDocSpec.scala](/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #circuit-breaker-usage } Java -: @@snip [DangerousJavaActor.java]($code$/java/jdocs/circuitbreaker/DangerousJavaActor.java) { #circuit-breaker-usage } +: @@snip [DangerousJavaActor.java](/akka-docs/src/test/java/jdocs/circuitbreaker/DangerousJavaActor.java) { #circuit-breaker-usage } @@@ note @@ -121,10 +121,10 @@ Type of `defineFailureFn`: @scala[`Try[T] ⇒ Boolean`]@java[`BiFunction[Optiona @java[The response of a protected call is modelled using `Optional[T]` for a successful return value and `Optional[Throwable]` for exceptions.] This function should return `true` if the call should increase failure count, else false. Scala -: @@snip [CircuitBreakerDocSpec.scala]($code$/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #even-no-as-failure } +: @@snip [CircuitBreakerDocSpec.scala](/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #even-no-as-failure } Java -: @@snip [EvenNoFailureJavaExample.java]($code$/java/jdocs/circuitbreaker/EvenNoFailureJavaExample.java) { #even-no-as-failure } +: @@snip [EvenNoFailureJavaExample.java](/akka-docs/src/test/java/jdocs/circuitbreaker/EvenNoFailureJavaExample.java) { #even-no-as-failure } ### Low level API @@ -139,7 +139,7 @@ The below example doesn't make a remote call when the state is *HalfOpen*. Using @@@ Scala -: @@snip [CircuitBreakerDocSpec.scala]($code$/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #circuit-breaker-tell-pattern } +: @@snip [CircuitBreakerDocSpec.scala](/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #circuit-breaker-tell-pattern } Java -: @@snip [TellPatternJavaActor.java]($code$/java/jdocs/circuitbreaker/TellPatternJavaActor.java) { #circuit-breaker-tell-pattern } +: @@snip [TellPatternJavaActor.java](/akka-docs/src/test/java/jdocs/circuitbreaker/TellPatternJavaActor.java) { #circuit-breaker-tell-pattern } diff --git a/akka-docs/src/main/paradox/common/duration.md b/akka-docs/src/main/paradox/common/duration.md index 3ccf0adc25..19d20f1a77 100644 --- a/akka-docs/src/main/paradox/common/duration.md +++ b/akka-docs/src/main/paradox/common/duration.md @@ -21,7 +21,7 @@ when finite-ness does not matter; this is a supertype of `FiniteDuration` In Scala durations are constructable using a mini-DSL and support all expected arithmetic operations: -@@snip [Sample.scala]($code$/scala/docs/duration/Sample.scala) { #dsl } +@@snip [Sample.scala](/akka-docs/src/test/scala/docs/duration/Sample.scala) { #dsl } @@@ note @@ -37,9 +37,9 @@ might go wrong, depending on what starts the next line. Java provides less syntactic sugar, so you have to spell out the operations as method calls instead: -@@snip [Java.java]($code$/java/jdocs/duration/Java.java) { #import } +@@snip [Java.java](/akka-docs/src/test/java/jdocs/duration/Java.java) { #import } -@@snip [Java.java]($code$/java/jdocs/duration/Java.java) { #dsl } +@@snip [Java.java](/akka-docs/src/test/java/jdocs/duration/Java.java) { #dsl } ## Deadline @@ -48,8 +48,8 @@ of an absolute point in time, and support deriving a duration from this by calcu difference between now and the deadline. This is useful when you want to keep one overall deadline without having to take care of the book-keeping wrt. the passing of time yourself: -@@snip [Sample.scala]($code$/scala/docs/duration/Sample.scala) { #deadline } +@@snip [Sample.scala](/akka-docs/src/test/scala/docs/duration/Sample.scala) { #deadline } In Java you create these from durations: -@@snip [Java.java]($code$/java/jdocs/duration/Java.java) { #deadline } \ No newline at end of file +@@snip [Java.java](/akka-docs/src/test/java/jdocs/duration/Java.java) { #deadline } diff --git a/akka-docs/src/main/paradox/dispatchers.md b/akka-docs/src/main/paradox/dispatchers.md index 824e01e9ba..392ee5f009 100644 --- a/akka-docs/src/main/paradox/dispatchers.md +++ b/akka-docs/src/main/paradox/dispatchers.md @@ -31,17 +31,17 @@ gives excellent performance in most cases. Dispatchers implement the `ExecutionContext` interface and can thus be used to run `Future` invocations etc. Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #lookup } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #lookup } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #lookup } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #lookup } ## Setting the dispatcher for an Actor So in case you want to give your `Actor` a different dispatcher than the default, you need to do two things, of which the first is to configure the dispatcher: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #my-dispatcher-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #my-dispatcher-config } @@@ note @@ -55,7 +55,7 @@ You can read more about parallelism in the JDK's [ForkJoinPool documentation](ht Another example that uses the "thread-pool-executor": -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #fixed-pool-size-dispatcher-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #fixed-pool-size-dispatcher-config } @@@ note @@ -69,23 +69,23 @@ For more options, see the default-dispatcher section of the @ref:[configuration] Then you create the actor as usual and define the dispatcher in the deployment configuration. Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-dispatcher-in-config } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-dispatcher-in-config } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-dispatcher-in-config } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-dispatcher-in-config } -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #dispatcher-deployment-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #dispatcher-deployment-config } An alternative to the deployment configuration is to define the dispatcher in code. If you define the `dispatcher` in the deployment configuration then this value will be used instead of programmatically provided parameter. Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-dispatcher-in-code } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-dispatcher-in-code } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-dispatcher-in-code } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-dispatcher-in-code } @@@ note @@ -140,40 +140,40 @@ There are 3 different types of message dispatchers: Configuring a dispatcher with fixed thread pool size, e.g. for actors that perform blocking IO: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #fixed-pool-size-dispatcher-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #fixed-pool-size-dispatcher-config } And then using it: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-fixed-pool-size-dispatcher } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-fixed-pool-size-dispatcher } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-fixed-pool-size-dispatcher } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-fixed-pool-size-dispatcher } Another example that uses the thread pool based on the number of cores (e.g. for CPU bound tasks) -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) {#my-thread-pool-dispatcher-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) {#my-thread-pool-dispatcher-config } A different kind of dispatcher that uses an affinity pool may increase throughput in cases where there is relatively small number of actors that maintain some internal state. The affinity pool tries its best to ensure that an actor is always scheduled to run on the same thread. This actor to thread pinning aims to decrease CPU cache misses which can result in significant throughput improvement. -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #affinity-pool-dispatcher-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #affinity-pool-dispatcher-config } Configuring a `PinnedDispatcher`: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) {#my-pinned-dispatcher-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) {#my-pinned-dispatcher-config } And then using it: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-pinned-dispatcher } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-pinned-dispatcher } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-pinned-dispatcher } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-pinned-dispatcher } Note that `thread-pool-executor` configuration as per the above `my-thread-pool-dispatcher` example is NOT applicable. This is because every actor will have its own thread pool when using `PinnedDispatcher`, @@ -193,10 +193,10 @@ is typically that (network) I/O occurs under the covers. Scala -: @@snip [BlockingDispatcherSample.scala]($akka$/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #blocking-in-actor } +: @@snip [BlockingDispatcherSample.scala](/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #blocking-in-actor } Java -: @@snip [BlockingDispatcherSample.java]($akka$/akka-docs/src/test/java/jdocs/actor/BlockingActor.java) { #blocking-in-actor } +: @@snip [BlockingDispatcherSample.java](/akka-docs/src/test/java/jdocs/actor/BlockingActor.java) { #blocking-in-actor } When facing this, you @@ -206,10 +206,10 @@ find bottlenecks or run out of memory or threads when the application runs under increased load. Scala -: @@snip [BlockingDispatcherSample.scala]($akka$/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #blocking-in-future } +: @@snip [BlockingDispatcherSample.scala](/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #blocking-in-future } Java -: @@snip [BlockingDispatcherSample.java]($akka$/akka-docs/src/test/java/jdocs/actor/BlockingFutureActor.java) { #blocking-in-future } +: @@snip [BlockingDispatcherSample.java](/akka-docs/src/test/java/jdocs/actor/BlockingFutureActor.java) { #blocking-in-future } ### Problem: Blocking on default dispatcher @@ -256,17 +256,17 @@ including Streams, Http and other reactive libraries built on top of it. Let's set up an application with the above `BlockingFutureActor` and the following `PrintActor`. Scala -: @@snip [BlockingDispatcherSample.scala]($akka$/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #print-actor } +: @@snip [BlockingDispatcherSample.scala](/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #print-actor } Java -: @@snip [BlockingDispatcherSample.java]($akka$/akka-docs/src/test/java/jdocs/actor/PrintActor.java) { #print-actor } +: @@snip [BlockingDispatcherSample.java](/akka-docs/src/test/java/jdocs/actor/PrintActor.java) { #print-actor } Scala -: @@snip [BlockingDispatcherSample.scala]($akka$/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #blocking-main } +: @@snip [BlockingDispatcherSample.scala](/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #blocking-main } Java -: @@snip [BlockingDispatcherSample.java]($akka$/akka-docs/src/test/java/jdocs/actor/BlockingDispatcherTest.java) { #blocking-main } +: @@snip [BlockingDispatcherSample.java](/akka-docs/src/test/java/jdocs/actor/BlockingDispatcherTest.java) { #blocking-main } Here the app is sending 100 messages to `BlockingFutureActor` and `PrintActor` and large numbers @@ -326,7 +326,7 @@ In `application.conf`, the dispatcher dedicated to blocking behavior should be configured as follows: -@@snip [BlockingDispatcherSample.scala]($akka$/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #my-blocking-dispatcher-config } +@@snip [BlockingDispatcherSample.scala](/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #my-blocking-dispatcher-config } A `thread-pool-executor` based dispatcher allows us to set a limit on the number of threads it will host, and this way we gain tight control over how at-most-how-many blocked threads will be in the system. @@ -339,10 +339,10 @@ Whenever blocking has to be done, use the above configured dispatcher instead of the default one: Scala -: @@snip [BlockingDispatcherSample.scala]($akka$/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #separate-dispatcher } +: @@snip [BlockingDispatcherSample.scala](/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala) { #separate-dispatcher } Java -: @@snip [BlockingDispatcherSample.java]($akka$/akka-docs/src/test/java/jdocs/actor/SeparateDispatcherFutureActor.java) { #separate-dispatcher } +: @@snip [BlockingDispatcherSample.java](/akka-docs/src/test/java/jdocs/actor/SeparateDispatcherFutureActor.java) { #separate-dispatcher } The thread pool behavior is shown in the below diagram. diff --git a/akka-docs/src/main/paradox/distributed-data.md b/akka-docs/src/main/paradox/distributed-data.md index 6b20c4374e..1fb57040af 100644 --- a/akka-docs/src/main/paradox/distributed-data.md +++ b/akka-docs/src/main/paradox/distributed-data.md @@ -61,10 +61,10 @@ adds or removes elements from a `ORSet` (observed-remove set). It also subscribe changes of this. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #data-bot } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #data-bot } Java -: @@snip [DataBot.java]($code$/java/jdocs/ddata/DataBot.java) { #data-bot } +: @@snip [DataBot.java](/akka-docs/src/test/java/jdocs/ddata/DataBot.java) { #data-bot } ### Update @@ -104,10 +104,10 @@ are preferred over unreachable nodes. Note that `WriteMajority` has a `minCap` parameter that is useful to specify to achieve better safety for small clusters. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #update } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #update } As reply of the `Update` a `Replicator.UpdateSuccess` is sent to the sender of the `Update` if the value was successfully replicated according to the supplied consistency @@ -117,17 +117,17 @@ or was rolled back. It may still have been replicated to some nodes, and will ev be replicated to all nodes with the gossip protocol. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-response1 } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-response1 } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-response1 } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #update-response1 } Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-response2 } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-response2 } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-response2 } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #update-response2 } You will always see your own writes. For example if you send two `Update` messages changing the value of the same `key`, the `modify` function of the second message will @@ -139,10 +139,10 @@ way to pass contextual information (e.g. original sender) without having to use or maintain local correlation data structures. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-request-context } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-request-context } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-request-context } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #update-request-context } ### Get @@ -162,10 +162,10 @@ at least **N/2 + 1** replicas, where N is the number of nodes in the cluster Note that `ReadMajority` has a `minCap` parameter that is useful to specify to achieve better safety for small clusters. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #get } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #get } As reply of the `Get` a `Replicator.GetSuccess` is sent to the sender of the `Get` if the value was successfully retrieved according to the supplied consistency @@ -173,17 +173,17 @@ level within the supplied timeout. Otherwise a `Replicator.GetFailure` is sent. If the key does not exist the reply will be `Replicator.NotFound`. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-response1 } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-response1 } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-response1 } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #get-response1 } Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-response2 } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-response2 } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-response2 } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #get-response2 } You will always read your own writes. For example if you send a `Update` message followed by a `Get` of the same `key` the `Get` will retrieve the change that was @@ -196,10 +196,10 @@ In the `Get` message you can pass an optional request context in the same way as to after receiving and transforming `GetSuccess`. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-request-context } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-request-context } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-request-context } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #get-request-context } ### Consistency @@ -252,24 +252,24 @@ the total size of the cluster. Here is an example of using `WriteMajority` and `ReadMajority`: Scala -: @@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #read-write-majority } +: @@snip [ShoppingCart.scala](/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala) { #read-write-majority } Java -: @@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #read-write-majority } +: @@snip [ShoppingCart.java](/akka-docs/src/test/java/jdocs/ddata/ShoppingCart.java) { #read-write-majority } Scala -: @@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #get-cart } +: @@snip [ShoppingCart.scala](/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala) { #get-cart } Java -: @@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #get-cart } +: @@snip [ShoppingCart.java](/akka-docs/src/test/java/jdocs/ddata/ShoppingCart.java) { #get-cart } Scala -: @@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #add-item } +: @@snip [ShoppingCart.scala](/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala) { #add-item } Java -: @@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #add-item } +: @@snip [ShoppingCart.java](/akka-docs/src/test/java/jdocs/ddata/ShoppingCart.java) { #add-item } In some rare cases, when performing an `Update` it is needed to first try to fetch latest data from other nodes. That can be done by first sending a `Get` with `ReadMajority` and then continue with @@ -282,10 +282,10 @@ performed (hence the name observed-removed set). The following example illustrates how to do that: Scala -: @@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #remove-item } +: @@snip [ShoppingCart.scala](/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala) { #remove-item } Java -: @@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #remove-item } +: @@snip [ShoppingCart.java](/akka-docs/src/test/java/jdocs/ddata/ShoppingCart.java) { #remove-item } @@@ warning @@ -311,10 +311,10 @@ The subscriber is automatically removed if the subscriber is terminated. A subsc also be deregistered with the `Replicator.Unsubscribe` message. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #subscribe } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #subscribe } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #subscribe } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #subscribe } ### Delete @@ -336,10 +336,10 @@ In the *Delete* message you can pass an optional request context in the same way to after receiving and transforming *DeleteSuccess*. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #delete } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #delete } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #delete } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #delete } @@@ warning @@ -406,10 +406,10 @@ as two internal `GCounter`s. Merge is handled by merging the internal P and N co The value of the counter is the value of the P counter minus the value of the N counter. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #pncounter } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #pncounter } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #pncounter } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #pncounter } `GCounter` and `PNCounter` have support for [delta-CRDT](#delta-crdt) and don't need causal delivery of deltas. @@ -420,10 +420,10 @@ values they are guaranteed to be replicated together as one unit, which is somet related data. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #pncountermap } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #pncountermap } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #pncountermap } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #pncountermap } ### Sets @@ -432,10 +432,10 @@ the data type to use. The elements can be any type of values that can be seriali Merge is the union of the two sets. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #gset } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #gset } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #gset } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #gset } `GSet` has support for [delta-CRDT](#delta-crdt) and it doesn't require causal delivery of deltas. @@ -449,10 +449,10 @@ called "birth dot". The version vector and the dots are used by the `merge` func track causality of the operations and resolve concurrent updates. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #orset } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #orset } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #orset } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #orset } `ORSet` has support for [delta-CRDT](#delta-crdt) and it requires causal delivery of deltas. @@ -486,10 +486,10 @@ uses delta propagation to deliver updates. Effectively, the update for map is th being the key and full update for the respective value (`ORSet`, `PNCounter` or `LWWRegister`) kept in the map. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #ormultimap } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #ormultimap } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #ormultimap } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #ormultimap } When a data entry is changed the full state of that entry is replicated to other nodes, i.e. when you update a map, the whole map is replicated. Therefore, instead of using one `ORMap` @@ -525,10 +525,10 @@ in the below section about `LWWRegister`. to `true`. Thereafter it cannot be changed. `true` wins over `false` in merge. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #flag } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #flag } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #flag } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #flag } `LWWRegister` (last writer wins register) can hold any (serializable) value. @@ -540,20 +540,20 @@ Merge takes the register updated by the node with lowest address (`UniqueAddress if the timestamps are exactly the same. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #lwwregister } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #lwwregister } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #lwwregister } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #lwwregister } Instead of using timestamps based on `System.currentTimeMillis()` time it is possible to use a timestamp value based on something else, for example an increasing version number from a database record that is used for optimistic concurrency control. Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #lwwregister-custom-clock } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #lwwregister-custom-clock } Java -: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #lwwregister-custom-clock } +: @@snip [DistributedDataDocTest.java](/akka-docs/src/test/java/jdocs/ddata/DistributedDataDocTest.java) { #lwwregister-custom-clock } For first-write-wins semantics you can use the `LWWRegister#reverseClock` instead of the `LWWRegister#defaultClock`. @@ -579,10 +579,10 @@ to keep track of addition and removals. A `TwoPhaseSet` is a set where an eleme removed, but never added again thereafter. Scala -: @@snip [TwoPhaseSet.scala]($code$/scala/docs/ddata/TwoPhaseSet.scala) { #twophaseset } +: @@snip [TwoPhaseSet.scala](/akka-docs/src/test/scala/docs/ddata/TwoPhaseSet.scala) { #twophaseset } Java -: @@snip [TwoPhaseSet.java]($code$/java/jdocs/ddata/TwoPhaseSet.java) { #twophaseset } +: @@snip [TwoPhaseSet.java](/akka-docs/src/test/java/jdocs/ddata/TwoPhaseSet.java) { #twophaseset } Data types should be immutable, i.e. "modifying" methods should return a new instance. @@ -602,15 +602,15 @@ deterministically in the serialization. This is a protobuf representation of the above `TwoPhaseSet`: -@@snip [TwoPhaseSetMessages.proto]($code$/../main/protobuf/TwoPhaseSetMessages.proto) { #twophaseset } +@@snip [TwoPhaseSetMessages.proto](/akka-docs/src/test/../main/protobuf/TwoPhaseSetMessages.proto) { #twophaseset } The serializer for the `TwoPhaseSet`: Scala -: @@snip [TwoPhaseSetSerializer.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala) { #serializer } +: @@snip [TwoPhaseSetSerializer.scala](/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala) { #serializer } Java -: @@snip [TwoPhaseSetSerializer.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java) { #serializer } +: @@snip [TwoPhaseSetSerializer.java](/akka-docs/src/test/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java) { #serializer } Note that the elements of the sets are sorted so the SHA-1 digests are the same for the same elements. @@ -618,25 +618,25 @@ for the same elements. You register the serializer in configuration: Scala -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #serializer-config } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #serializer-config } Java -: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #japi-serializer-config } +: @@snip [DistributedDataDocSpec.scala](/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala) { #japi-serializer-config } Using compression can sometimes be a good idea to reduce the data size. Gzip compression is provided by the @scala[`akka.cluster.ddata.protobuf.SerializationSupport` trait]@java[`akka.cluster.ddata.protobuf.AbstractSerializationSupport` interface]: Scala -: @@snip [TwoPhaseSetSerializer.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala) { #compression } +: @@snip [TwoPhaseSetSerializer.scala](/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala) { #compression } Java -: @@snip [TwoPhaseSetSerializerWithCompression.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java) { #compression } +: @@snip [TwoPhaseSetSerializerWithCompression.java](/akka-docs/src/test/java/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java) { #compression } The two embedded `GSet` can be serialized as illustrated above, but in general when composing new data types from the existing built in types it is better to make use of the existing serializer for those types. This can be done by declaring those as bytes fields in protobuf: -@@snip [TwoPhaseSetMessages.proto]($code$/../main/protobuf/TwoPhaseSetMessages.proto) { #twophaseset2 } +@@snip [TwoPhaseSetMessages.proto](/akka-docs/src/test/../main/protobuf/TwoPhaseSetMessages.proto) { #twophaseset2 } and use the methods `otherMessageToProto` and `otherMessageFromBinary` that are provided by the `SerializationSupport` trait to serialize and deserialize the `GSet` instances. This @@ -644,10 +644,10 @@ works with any type that has a registered Akka serializer. This is how such an s look like for the `TwoPhaseSet`: Scala -: @@snip [TwoPhaseSetSerializer2.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala) { #serializer } +: @@snip [TwoPhaseSetSerializer2.scala](/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala) { #serializer } Java -: @@snip [TwoPhaseSetSerializer2.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java) { #serializer } +: @@snip [TwoPhaseSetSerializer2.java](/akka-docs/src/test/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java) { #serializer } ### Durable Storage @@ -792,4 +792,4 @@ paper by Mark Shapiro et. al. The `DistributedData` extension can be configured with the following properties: -@@snip [reference.conf]($akka$/akka-distributed-data/src/main/resources/reference.conf) { #distributed-data } +@@snip [reference.conf](/akka-distributed-data/src/main/resources/reference.conf) { #distributed-data } diff --git a/akka-docs/src/main/paradox/distributed-pub-sub.md b/akka-docs/src/main/paradox/distributed-pub-sub.md index 8af39d6882..bf54cb3414 100644 --- a/akka-docs/src/main/paradox/distributed-pub-sub.md +++ b/akka-docs/src/main/paradox/distributed-pub-sub.md @@ -76,35 +76,35 @@ can explicitly remove entries with `DistributedPubSubMediator.Unsubscribe`. An example of a subscriber actor: Scala -: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #subscriber } +: @@snip [DistributedPubSubMediatorSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #subscriber } Java -: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #subscriber } +: @@snip [DistributedPubSubMediatorTest.java](/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #subscriber } Subscriber actors can be started on several nodes in the cluster, and all will receive messages published to the "content" topic. Scala -: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #start-subscribers } +: @@snip [DistributedPubSubMediatorSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #start-subscribers } Java -: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #start-subscribers } +: @@snip [DistributedPubSubMediatorTest.java](/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #start-subscribers } A simple actor that publishes to this "content" topic: Scala -: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #publisher } +: @@snip [DistributedPubSubMediatorSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #publisher } Java -: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #publisher } +: @@snip [DistributedPubSubMediatorTest.java](/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #publisher } It can publish messages to the topic from anywhere in the cluster: Scala -: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #publish-message } +: @@snip [DistributedPubSubMediatorSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #publish-message } Java -: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #publish-message } +: @@snip [DistributedPubSubMediatorTest.java](/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #publish-message } ### Topic Groups @@ -161,35 +161,35 @@ can explicitly remove entries with `DistributedPubSubMediator.Remove`. An example of a destination actor: Scala -: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #send-destination } +: @@snip [DistributedPubSubMediatorSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #send-destination } Java -: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #send-destination } +: @@snip [DistributedPubSubMediatorTest.java](/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #send-destination } Destination actors can be started on several nodes in the cluster, and all will receive messages sent to the path (without address information). Scala -: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #start-send-destinations } +: @@snip [DistributedPubSubMediatorSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #start-send-destinations } Java -: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #start-send-destinations } +: @@snip [DistributedPubSubMediatorTest.java](/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #start-send-destinations } A simple actor that sends to the path: Scala -: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #sender } +: @@snip [DistributedPubSubMediatorSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #sender } Java -: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #sender } +: @@snip [DistributedPubSubMediatorTest.java](/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #sender } It can send messages to the path from anywhere in the cluster: Scala -: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #send-message } +: @@snip [DistributedPubSubMediatorSpec.scala](/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #send-message } Java -: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #send-message } +: @@snip [DistributedPubSubMediatorTest.java](/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #send-message } It is also possible to broadcast messages to the actors that have been registered with `Put`. Send `DistributedPubSubMediator.SendToAll` message to the local mediator and the wrapped message @@ -213,7 +213,7 @@ want to use different cluster roles for different mediators. The `DistributedPubSub` extension can be configured with the following properties: -@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #pub-sub-ext-config } +@@snip [reference.conf](/akka-cluster-tools/src/main/resources/reference.conf) { #pub-sub-ext-config } It is recommended to load the extension when the actor system is started by defining it in `akka.extensions` configuration property. Otherwise it will be activated when first used diff --git a/akka-docs/src/main/paradox/event-bus.md b/akka-docs/src/main/paradox/event-bus.md index 34a3f52ce9..9d0ee53b9a 100644 --- a/akka-docs/src/main/paradox/event-bus.md +++ b/akka-docs/src/main/paradox/event-bus.md @@ -5,10 +5,10 @@ Originally conceived as a way to send messages to groups of actors, the implementing a simple interface: Scala -: @@snip [EventBus.scala]($akka$/akka-actor/src/main/scala/akka/event/EventBus.scala) { #event-bus-api } +: @@snip [EventBus.scala](/akka-actor/src/main/scala/akka/event/EventBus.scala) { #event-bus-api } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #event-bus-api } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #event-bus-api } @@@ note @@ -48,18 +48,18 @@ compare subscribers and how exactly to classify. The necessary methods to be implemented are illustrated with the following example: Scala -: @@snip [EventBusDocSpec.scala]($code$/scala/docs/event/EventBusDocSpec.scala) { #lookup-bus } +: @@snip [EventBusDocSpec.scala](/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala) { #lookup-bus } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #lookup-bus } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #lookup-bus } A test for this implementation may look like this: Scala -: @@snip [EventBusDocSpec.scala]($code$/scala/docs/event/EventBusDocSpec.scala) { #lookup-bus-test } +: @@snip [EventBusDocSpec.scala](/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala) { #lookup-bus-test } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #lookup-bus-test } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #lookup-bus-test } This classifier is efficient in case no subscribers exist for a particular event. @@ -76,18 +76,18 @@ classifier hierarchy. The necessary methods to be implemented are illustrated with the following example: Scala -: @@snip [EventBusDocSpec.scala]($code$/scala/docs/event/EventBusDocSpec.scala) { #subchannel-bus } +: @@snip [EventBusDocSpec.scala](/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala) { #subchannel-bus } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #subchannel-bus } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #subchannel-bus } A test for this implementation may look like this: Scala -: @@snip [EventBusDocSpec.scala]($code$/scala/docs/event/EventBusDocSpec.scala) { #subchannel-bus-test } +: @@snip [EventBusDocSpec.scala](/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala) { #subchannel-bus-test } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #subchannel-bus-test } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #subchannel-bus-test } This classifier is also efficient in case no subscribers are found for an event, but it uses conventional locking to synchronize an internal classifier @@ -106,18 +106,18 @@ stations by geographical reachability (for old-school radio-wave transmission). The necessary methods to be implemented are illustrated with the following example: Scala -: @@snip [EventBusDocSpec.scala]($code$/scala/docs/event/EventBusDocSpec.scala) { #scanning-bus } +: @@snip [EventBusDocSpec.scala](/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala) { #scanning-bus } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #scanning-bus } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #scanning-bus } A test for this implementation may look like this: Scala -: @@snip [EventBusDocSpec.scala]($code$/scala/docs/event/EventBusDocSpec.scala) { #scanning-bus-test } +: @@snip [EventBusDocSpec.scala](/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala) { #scanning-bus-test } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #scanning-bus-test } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #scanning-bus-test } This classifier takes always a time which is proportional to the number of subscriptions, independent of how many actually match. @@ -137,18 +137,18 @@ takes care of unsubscribing terminated actors automatically. The necessary methods to be implemented are illustrated with the following example: Scala -: @@snip [EventBusDocSpec.scala]($code$/scala/docs/event/EventBusDocSpec.scala) { #actor-bus } +: @@snip [EventBusDocSpec.scala](/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala) { #actor-bus } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #actor-bus } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #actor-bus } A test for this implementation may look like this: Scala -: @@snip [EventBusDocSpec.scala]($code$/scala/docs/event/EventBusDocSpec.scala) { #actor-bus-test } +: @@snip [EventBusDocSpec.scala](/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala) { #actor-bus-test } Java -: @@snip [EventBusDocTest.java]($code$/java/jdocs/event/EventBusDocTest.java) { #actor-bus-test } +: @@snip [EventBusDocTest.java](/akka-docs/src/test/java/jdocs/event/EventBusDocTest.java) { #actor-bus-test } This classifier is still is generic in the event type, and it is efficient for all use cases. @@ -165,20 +165,20 @@ how a simple subscription works. Given a simple actor: @@@ div { .group-scala } -@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #deadletters } +@@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #deadletters } @@@ @@@ div { .group-java } -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports-deadletter } +@@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #imports-deadletter } -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #deadletter-actor } +@@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #deadletter-actor } it can be subscribed like this: It can be subscribed like this: -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #deadletters } +@@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #deadletters } @@@ @@ -188,10 +188,10 @@ is implemented in the event stream, it is possible to subscribe to a group of ev subscribing to their common superclass as demonstrated in the following example: Scala -: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #superclass-subscription-eventstream } +: @@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #superclass-subscription-eventstream } Java -: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #superclass-subscription-eventstream } +: @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #superclass-subscription-eventstream } Similarly to [Actor Classification](#actor-classification), `EventStream` will automatically remove subscribers when they terminate. @@ -253,18 +253,18 @@ However, in case you find yourself in need of debugging these kinds of low level it's still possible to subscribe to them explicitly: Scala -: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #suppressed-deadletters } +: @@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #suppressed-deadletters } Java -: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #suppressed-deadletters } +: @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #suppressed-deadletters } or all dead letters (including the suppressed ones): Scala -: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #all-deadletters } +: @@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #all-deadletters } Java -: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #all-deadletters } +: @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #all-deadletters } ### Other Uses diff --git a/akka-docs/src/main/paradox/extending-akka.md b/akka-docs/src/main/paradox/extending-akka.md index dd85375bae..382ec2a755 100644 --- a/akka-docs/src/main/paradox/extending-akka.md +++ b/akka-docs/src/main/paradox/extending-akka.md @@ -21,40 +21,40 @@ So let's create a sample extension that lets us count the number of times someth First, we define what our `Extension` should do: Scala -: @@snip [ExtensionDocSpec.scala]($code$/scala/docs/extension/ExtensionDocSpec.scala) { #extension } +: @@snip [ExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala) { #extension } Java -: @@snip [ExtensionDocTest.java]($code$/java/jdocs/extension/ExtensionDocTest.java) { #imports #extension } +: @@snip [ExtensionDocTest.java](/akka-docs/src/test/java/jdocs/extension/ExtensionDocTest.java) { #imports #extension } Then we need to create an `ExtensionId` for our extension so we can grab a hold of it. Scala -: @@snip [ExtensionDocSpec.scala]($code$/scala/docs/extension/ExtensionDocSpec.scala) { #extensionid } +: @@snip [ExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala) { #extensionid } Java -: @@snip [ExtensionDocTest.java]($code$/java/jdocs/extension/ExtensionDocTest.java) { #imports #extensionid } +: @@snip [ExtensionDocTest.java](/akka-docs/src/test/java/jdocs/extension/ExtensionDocTest.java) { #imports #extensionid } Wicked! Now all we need to do is to actually use it: Scala -: @@snip [ExtensionDocSpec.scala]($code$/scala/docs/extension/ExtensionDocSpec.scala) { #extension-usage } +: @@snip [ExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala) { #extension-usage } Java -: @@snip [ExtensionDocTest.java]($code$/java/jdocs/extension/ExtensionDocTest.java) { #extension-usage } +: @@snip [ExtensionDocTest.java](/akka-docs/src/test/java/jdocs/extension/ExtensionDocTest.java) { #extension-usage } Or from inside of an Akka Actor: Scala -: @@snip [ExtensionDocSpec.scala]($code$/scala/docs/extension/ExtensionDocSpec.scala) { #extension-usage-actor } +: @@snip [ExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala) { #extension-usage-actor } Java -: @@snip [ExtensionDocTest.java]($code$/java/jdocs/extension/ExtensionDocTest.java) { #extension-usage-actor } +: @@snip [ExtensionDocTest.java](/akka-docs/src/test/java/jdocs/extension/ExtensionDocTest.java) { #extension-usage-actor } @@@ div { .group-scala } You can also hide extension behind traits: -@@snip [ExtensionDocSpec.scala]($code$/scala/docs/extension/ExtensionDocSpec.scala) { #extension-usage-actor-trait } +@@snip [ExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala) { #extension-usage-actor-trait } @@@ @@ -66,7 +66,7 @@ To be able to load extensions from your Akka configuration you must add FQCNs of in the `akka.extensions` section of the config you provide to your `ActorSystem`. Scala -: @@snip [ExtensionDocSpec.scala]($code$/scala/docs/extension/ExtensionDocSpec.scala) { #config } +: @@snip [ExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala) { #config } Java : @@@vars @@ -89,23 +89,23 @@ The @ref:[configuration](general/configuration.md) can be used for application s Sample configuration: -@@snip [SettingsExtensionDocSpec.scala]($code$/scala/docs/extension/SettingsExtensionDocSpec.scala) { #config } +@@snip [SettingsExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala) { #config } The `Extension`: Scala -: @@snip [SettingsExtensionDocSpec.scala]($code$/scala/docs/extension/SettingsExtensionDocSpec.scala) { #imports #extension #extensionid } +: @@snip [SettingsExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala) { #imports #extension #extensionid } Java -: @@snip [SettingsExtensionDocTest.java]($code$/java/jdocs/extension/SettingsExtensionDocTest.java) { #imports #extension #extensionid } +: @@snip [SettingsExtensionDocTest.java](/akka-docs/src/test/java/jdocs/extension/SettingsExtensionDocTest.java) { #imports #extension #extensionid } Use it: Scala -: @@snip [SettingsExtensionDocSpec.scala]($code$/scala/docs/extension/SettingsExtensionDocSpec.scala) { #extension-usage-actor } +: @@snip [SettingsExtensionDocSpec.scala](/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala) { #extension-usage-actor } Java -: @@snip [SettingsExtensionDocTest.java]($code$/java/jdocs/extension/SettingsExtensionDocTest.java) { #extension-usage-actor } +: @@snip [SettingsExtensionDocTest.java](/akka-docs/src/test/java/jdocs/extension/SettingsExtensionDocTest.java) { #extension-usage-actor } ## Library extensions @@ -125,4 +125,4 @@ this could be important is in tests. The``akka.library-extensions`` must never be assigned (`= ["Extension"]`) instead of appending as this will break the library-extension mechanism and make behavior depend on class path ordering. -@@@ \ No newline at end of file +@@@ diff --git a/akka-docs/src/main/paradox/fault-tolerance-sample.md b/akka-docs/src/main/paradox/fault-tolerance-sample.md index b526871dd6..c7a5562dbc 100644 --- a/akka-docs/src/main/paradox/fault-tolerance-sample.md +++ b/akka-docs/src/main/paradox/fault-tolerance-sample.md @@ -36,7 +36,7 @@ # Full Source Code of the Fault Tolerance Sample Scala -: @@snip [FaultHandlingDocSample.scala]($code$/scala/docs/actor/FaultHandlingDocSample.scala) { #all } +: @@snip [FaultHandlingDocSample.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala) { #all } Java -: @@snip [FaultHandlingDocSample.java]($code$/java/jdocs/actor/FaultHandlingDocSample.java) { #all } +: @@snip [FaultHandlingDocSample.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingDocSample.java) { #all } diff --git a/akka-docs/src/main/paradox/fault-tolerance.md b/akka-docs/src/main/paradox/fault-tolerance.md index 5f7838d3d6..fdd93cabcc 100644 --- a/akka-docs/src/main/paradox/fault-tolerance.md +++ b/akka-docs/src/main/paradox/fault-tolerance.md @@ -36,10 +36,10 @@ in more depth. For the sake of demonstration let us consider the following strategy: Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #strategy } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #strategy } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #strategy } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #strategy } We have chosen a few well-known exception types in order to demonstrate the application of the fault handling directives described in @ref:[supervision](general/supervision.md). @@ -94,7 +94,7 @@ in the same way as the default strategy defined above. You can combine your own strategy with the default strategy: -@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #default-strategy-fallback } +@@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #default-strategy-fallback } @@@ @@ -135,73 +135,73 @@ The following section shows the effects of the different directives in practice, where a test setup is needed. First off, we need a suitable supervisor: Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #supervisor } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #supervisor } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #supervisor } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #supervisor } This supervisor will be used to create a child, with which we can experiment: Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #child } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #child } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #child } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #child } The test is easier by using the utilities described in @scala[@ref:[Testing Actor Systems](testing.md)]@java[@ref:[TestKit](testing.md)], where `TestProbe` provides an actor ref useful for receiving and inspecting replies. Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #testkit } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #testkit } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #testkit } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #testkit } Let us create actors: Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #create } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #create } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #create } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #create } The first test shall demonstrate the `Resume` directive, so we try it out by setting some non-initial state in the actor and have it fail: Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #resume } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #resume } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #resume } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #resume } As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious `NullPointerException`, that will no longer be the case: Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #restart } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #restart } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #restart } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #restart } And finally in case of the fatal `IllegalArgumentException` the child will be terminated by the supervisor: Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #stop } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #stop } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #stop } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #stop } Up to now the supervisor was completely unaffected by the child’s failure, because the directives set did handle it. In case of an `Exception`, this is not true anymore and the supervisor escalates the failure. Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #escalate-kill } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #escalate-kill } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #escalate-kill } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #escalate-kill } The supervisor itself is supervised by the top-level actor provided by the `ActorSystem`, which has the default policy to restart in case of all @@ -214,16 +214,16 @@ In case this is not desired (which depends on the use case), we need to use a different supervisor which overrides this behavior. Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #supervisor2 } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #supervisor2 } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #supervisor2 } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #supervisor2 } With this parent, the child survives the escalated restart, as demonstrated in the last test: Scala -: @@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #escalate-restart } +: @@snip [FaultHandlingDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala) { #escalate-restart } Java -: @@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #escalate-restart } +: @@snip [FaultHandlingTest.java](/akka-docs/src/test/java/jdocs/actor/FaultHandlingTest.java) { #escalate-restart } diff --git a/akka-docs/src/main/paradox/fsm.md b/akka-docs/src/main/paradox/fsm.md index 1d58891174..631729f609 100644 --- a/akka-docs/src/main/paradox/fsm.md +++ b/akka-docs/src/main/paradox/fsm.md @@ -40,28 +40,28 @@ send them on after the burst ended or a flush request is received. First, consider all of the below to use these import statements: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-imports } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #simple-imports } Java -: @@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #simple-imports } +: @@snip [Buncher.java](/akka-docs/src/test/java/jdocs/actor/fsm/Buncher.java) { #simple-imports } The contract of our “Buncher” actor is that it accepts or produces the following messages: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-events } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #simple-events } Java -: @@snip [Events.java]($code$/java/jdocs/actor/fsm/Events.java) { #simple-events } +: @@snip [Events.java](/akka-docs/src/test/java/jdocs/actor/fsm/Events.java) { #simple-events } `SetTarget` is needed for starting it up, setting the destination for the `Batches` to be passed on; `Queue` will add to the internal queue while `Flush` will mark the end of a burst. Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-state } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #simple-state } Java -: @@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #simple-state } +: @@snip [Buncher.java](/akka-docs/src/test/java/jdocs/actor/fsm/Buncher.java) { #simple-state } The actor can be in two states: no message queued (aka `Idle`) or some message queued (aka `Active`). It will stay in the `Active` state as long as @@ -72,10 +72,10 @@ the actual queue of messages. Now let’s take a look at the skeleton for our FSM actor: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-fsm } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #simple-fsm } Java -: @@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #simple-fsm } +: @@snip [Buncher.java](/akka-docs/src/test/java/jdocs/actor/fsm/Buncher.java) { #simple-fsm } The basic strategy is to declare the actor, @scala[mixing in the `FSM` trait]@java[by inheriting the `AbstractFSM` class] and specifying the possible states and data values as type parameters. Within @@ -103,10 +103,10 @@ which is not handled by the `when()` block is passed to the `whenUnhandled()` block: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #unhandled-elided } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #unhandled-elided } Java -: @@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #unhandled-elided } +: @@snip [Buncher.java](/akka-docs/src/test/java/jdocs/actor/fsm/Buncher.java) { #unhandled-elided } The first case handled here is adding `Queue()` requests to the internal queue and going to the `Active` state (this does the obvious thing of staying @@ -121,10 +121,10 @@ multiple such blocks and all of them will be tried for matching behavior in case a state transition occurs (i.e. only when the state actually changes). Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #transition-elided } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #transition-elided } Java -: @@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #transition-elided } +: @@snip [Buncher.java](/akka-docs/src/test/java/jdocs/actor/fsm/Buncher.java) { #transition-elided } The transition callback is a @scala[partial function]@java[builder constructed by `matchState`, followed by zero or multiple `state`], which takes as input a pair of states—the current and the next state. @scala[The FSM trait includes a convenience @@ -146,10 +146,10 @@ To verify that this buncher actually works, it is quite easy to write a test using the @scala[@ref:[Testing Actor Systems which is conveniently bundled with ScalaTest traits into `AkkaSpec`](testing.md)]@java[@ref:[TestKit](testing.md), here using JUnit as an example]: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #test-code } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #test-code } Java -: @@snip [BuncherTest.java]($code$/java/jdocs/actor/fsm/BuncherTest.java) { #test-code } +: @@snip [BuncherTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/BuncherTest.java) { #test-code } ## Reference @@ -165,10 +165,10 @@ Actor since an Actor is created to drive the FSM. ] Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-fsm } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #simple-fsm } Java -: @@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #simple-fsm } +: @@snip [Buncher.java](/akka-docs/src/test/java/jdocs/actor/fsm/Buncher.java) { #simple-fsm } @@@ note @@ -222,10 +222,10 @@ which is conveniently given using the @scala[partial function literal]@java[stat demonstrated below: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #when-syntax } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #when-syntax } Java -: @@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #when-syntax } +: @@snip [Buncher.java](/akka-docs/src/test/java/jdocs/actor/fsm/Buncher.java) { #when-syntax } @@@ div { .group-scala } @@ -247,10 +247,10 @@ states. If you want to leave the handling of a state “unhandled” (more below it still needs to be declared like this: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #NullFunction } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #NullFunction } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #NullFunction } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #NullFunction } ### Defining the Initial State @@ -271,10 +271,10 @@ do something else in this case you can specify that with `whenUnhandled(stateFunction)`: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #unhandled-syntax } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #unhandled-syntax } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #unhandled-syntax } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #unhandled-syntax } Within this handler the state of the FSM may be queried using the `stateName` method. @@ -314,10 +314,10 @@ does not modify the state transition. All modifiers can be chained to achieve a nice and concise description: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #modifier-syntax } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #modifier-syntax } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #modifier-syntax } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #modifier-syntax } The parentheses are not actually needed in all cases, but they visually distinguish between modifiers and their arguments and therefore make the code @@ -356,10 +356,10 @@ resulting state is needed as it is not possible to modify the transition in progress. Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #transition-syntax } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #transition-syntax } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #transition-syntax } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #transition-syntax } @@@ div { .group-scala } @@ -376,10 +376,10 @@ It is also possible to pass a function object accepting two states to a method: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #alt-transition-syntax } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #alt-transition-syntax } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #alt-transition-syntax } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #alt-transition-syntax } The handlers registered with this method are stacked, so you can intersperse `onTransition` blocks with `when` blocks as suits your design. It @@ -431,13 +431,13 @@ transformed using Scala’s full supplement of functional programming tools. In order to retain type inference, there is a helper function which may be used in case some common handling logic shall be applied to different clauses: -@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #transform-syntax } +@@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #transform-syntax } It goes without saying that the arguments to this method may also be stored, to be used several times, e.g. when applying the same transformation to several `when()` blocks: -@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #alt-transform-syntax } +@@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #alt-transform-syntax } @@@ @@ -495,20 +495,20 @@ may not be used within a `when` block). @@@ Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #stop-syntax } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #stop-syntax } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #stop-syntax } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #stop-syntax } You can use `onTermination(handler)` to specify custom code that is executed when the FSM is stopped. The handler is a partial function which takes a `StopEvent(reason, stateName, stateData)` as argument: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #termination-syntax } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #termination-syntax } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #termination-syntax } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #termination-syntax } As for the `whenUnhandled` case, this handler is not stacked, so each invocation of `onTermination` replaces the previously installed handler. @@ -541,10 +541,10 @@ The setting `akka.actor.debug.fsm` in @ref:[configuration](general/configuration event trace by `LoggingFSM` instances: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #logging-fsm } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #logging-fsm } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #logging-fsm } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #logging-fsm } This FSM will log at DEBUG level: @@ -563,10 +563,10 @@ log which may be used during debugging (for tracing how the FSM entered a certain failure state) or for other creative uses: Scala -: @@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #logging-fsm } +: @@snip [FSMDocSpec.scala](/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala) { #logging-fsm } Java -: @@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #logging-fsm } +: @@snip [FSMDocTest.java](/akka-docs/src/test/java/jdocs/actor/fsm/FSMDocTest.java) { #logging-fsm } The `logDepth` defaults to zero, which turns off the event log. diff --git a/akka-docs/src/main/paradox/futures.md b/akka-docs/src/main/paradox/futures.md index a310994df2..32eecddec7 100644 --- a/akka-docs/src/main/paradox/futures.md +++ b/akka-docs/src/main/paradox/futures.md @@ -33,10 +33,10 @@ it will use its default dispatcher as the `ExecutionContext`, or you can use the by the @scala[`ExecutionContext` companion object]@java[`ExecutionContexts` class] to wrap `Executors` and `ExecutorServices`, or even create your own. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #diy-execution-context } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #diy-execution-context } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports1 #diy-execution-context } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports1 #diy-execution-context } ### Within Actors @@ -48,10 +48,10 @@ to reuse the dispatcher for running the Futures by importing @scala[`context.dispatcher`]@java[`getContext().dispatcher()`]. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #context-dispatcher } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #context-dispatcher } Java -: @@snip [ActorWithFuture.java]($code$/java/jdocs/future/ActorWithFuture.java) { #context-dispatcher } +: @@snip [ActorWithFuture.java](/akka-docs/src/test/java/jdocs/future/ActorWithFuture.java) { #context-dispatcher } ## Use with Actors @@ -62,10 +62,10 @@ Using @scala[an `Actor`'s `?`]@java[the `ActorRef`'s `ask`] method to send a mes To wait for and retrieve the actual result the simplest method is: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #ask-blocking } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #ask-blocking } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports1 #ask-blocking } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports1 #ask-blocking } This will cause the current thread to block and wait for the @scala[`Actor`]@java[`AbstractActor`] to 'complete' the `Future` with its reply. Blocking is discouraged though as it will cause performance problems. @@ -86,7 +86,7 @@ asynchronous composition as described below. When using non-blocking it is better to use the `mapTo` method to safely try to cast a `Future` to an expected type: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #map-to } +@@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #map-to } The `mapTo` method will return a new `Future` that contains the result if the cast was successful, or a `ClassCastException` if not. Handling `Exception`s will be discussed further within this documentation. @@ -99,10 +99,10 @@ Another useful message-transfer pattern is "pipe", which is to send the result o The pipe pattern can be used by importing @java[`akka.pattern.PatternsCS.pipe`.]@scala[`akka.pattern.pipe`, and define or import an implicit instance of `ExecutionContext` in the scope.] Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to-usage } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #pipe-to-usage } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports-ask #imports-pipe #pipe-to-usage } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports-ask #imports-pipe #pipe-to-usage } To see how this works in more detail, let's introduce a small example consisting of three different actors, `UserProxyActor`, `UserDataActor` and `UserActivityActor`. @@ -120,26 +120,26 @@ then it gets the corresponding result from the appropriate backend actor based o The message types you send to `UserProxyActor` are `GetUserData` and `GetUserActivities`: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to-proxy-messages } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #pipe-to-proxy-messages } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #pipe-to-proxy-messages } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #pipe-to-proxy-messages } and `UserData` and @scala[`List[UserActivity]`]@java[`ArrayList`] are returned to the original sender in the end. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to-returned-data } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #pipe-to-returned-data } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #pipe-to-returned-data } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #pipe-to-returned-data } The backend `UserDataActor` and `UserActivityActor` are defined as follows: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to-user-data-actor } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #pipe-to-user-data-actor } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #pipe-to-user-data-actor } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #pipe-to-user-data-actor } `UserDataActor` holds the data in memory, so that it can return the current state of the user data quickly upon a request. @@ -147,10 +147,10 @@ On the other hand, `UserActivityActor` queries into a `repository` to retrieve h sends the result to the `sender()` which is `UserProxy` in this case, with the pipe pattern. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to-user-activity-actor } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #pipe-to-user-activity-actor } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports-pipe #pipe-to-user-activity-actor } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports-pipe #pipe-to-user-activity-actor } Since it needs to talk to the separate `repository`, it takes time to retrieve the list of `UserActivity`, hence the return type of `queryHistoricalActivities` is @scala[`Future`]@java[`CompletableFuture`]. @@ -160,10 +160,10 @@ so that the result of the @scala[`Future`]@java[`CompletableFuture`] is sent to Finally, the definition of `UserProxyActor` is as below. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to-proxy-actor } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #pipe-to-proxy-actor } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports-ask #imports-pipe #pipe-to-proxy-actor } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports-ask #imports-pipe #pipe-to-proxy-actor } Note that the @scala[`pipeTo`]@java[`pipe`] method used with the @scala[`?`]@java[`ask`] method. Using @scala[`pipeTo`]@java[`pipe`] with the @scala[`?`]@java[`ask`] method is a common practice when you want to relay a message from one actor to another. @@ -175,10 +175,10 @@ If you find yourself creating a pool of @scala[`Actor`s]@java[`AbstractActor`s] there is an easier (and faster) way: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #future-eval } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #future-eval } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports2 #future-eval } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports2 #future-eval } In the above code the block passed to `Future` will be executed by the default `Dispatcher`, with the return value of the block used to complete the `Future` (in this case, the result would be the string: "HelloWorld"). @@ -188,32 +188,32 @@ and we also avoid the overhead of managing an @scala[`Actor`]@java[`AbstractActo You can also create already completed Futures using the @scala[`Future` companion]@java[`Futures` class], which can be either successes: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #successful } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #successful } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #successful } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #successful } Or failures: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #failed } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #failed } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #failed } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #failed } It is also possible to create an empty `Promise`, to be filled later, and obtain the corresponding `Future`: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #promise } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #promise } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #promise } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #promise } @@@ div { .group-java } For these examples `PrintResult` is defined as follows: -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #print-result } +@@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #print-result } @@@ @@ -227,10 +227,10 @@ which performs some operation on the result of the `Future`, and returning a new The return value of the `map` method is another `Future` that will contain the new result: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #map } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #map } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports2 #map } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports2 #map } In this example we are joining two strings together within a `Future`. Instead of waiting for @scala[`this`]@java[`f1`] to complete, we apply our function that calculates the length of the string using the `map` method. @@ -247,24 +247,24 @@ the `Future` has already been completed, when one of these methods is called. The `map` method is fine if we are modifying a single `Future`, but if 2 or more `Future`s are involved `map` will not allow you to combine them together: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #wrong-nested-map } +@@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #wrong-nested-map } `f3` is a `Future[Future[Int]]` instead of the desired `Future[Int]`. Instead, the `flatMap` method should be used: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #flat-map } +@@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #flat-map } Composing futures using nested combinators it can sometimes become quite complicated and hard to read, in these cases using Scala's 'for comprehensions' usually yields more readable code. See next section for examples. If you need to do conditional propagation, you can use `filter`: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #filter } +@@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #filter } ### For Comprehensions Since `Future` has a `map`, `filter` and `flatMap` method it can be used in a 'for comprehension': -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #for-comprehension } +@@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #for-comprehension } Something to keep in mind when doing this is even though it looks like parts of the above example can run in parallel, each step of the for comprehension is run sequentially. This will happen on separate threads for each step but @@ -282,13 +282,13 @@ A common use case for this is combining the replies of several `Actor`s into a s without resorting to calling `Await.result` or `Await.ready` to block for each result. First an example of using `Await.result`: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #composing-wrong } +@@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #composing-wrong } Here we wait for the results from the first 2 `Actor`s before sending that result to the third `Actor`. We called `Await.result` 3 times, which caused our little program to block 3 times before getting our final result. Now compare that to this example: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #composing } +@@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #composing } Here we have 2 actors processing a single message each. Once the 2 results are available (note that we don't block to get these results!), they are being added together and sent to a third `Actor`, @@ -309,10 +309,10 @@ below are some examples on how that can be done in a non-blocking fashion. @@@ Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #sequence-ask } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #sequence-ask } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports3 #sequence } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports3 #sequence } To better explain what happened in the example, `Future.sequence` is taking the @scala[`List[Future[Int]]`]@java[`Iterable>`] and turning it into a @scala[`Future[List[Int]]`]@java[`Future>`]. We can then use `map` to work with the @scala[`List[Int]`]@java[`Iterable`] directly, @@ -323,16 +323,16 @@ The `traverse` method is similar to `sequence`, but it takes a sequence of `A` a @java[and returns a `Future>`, enabling parallel map over the sequence, if you use `Futures.future` to create the `Future`.] Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #traverse } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #traverse } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports4 #traverse } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports4 #traverse } @@@ div { .group-scala } This is the same result as this example: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #sequence } +@@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #sequence } But it may be faster to use `traverse` as it doesn't have to create an intermediate `List[Future[Int]]`. @@ -345,10 +345,10 @@ and then applies the function to all elements in the sequence of futures, non-bl the execution will be started when the last of the Futures is completed. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #fold } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #fold } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports5 #fold } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports5 #fold } That's all it takes! @@ -357,10 +357,10 @@ In some cases you don't have a start-value and you're able to use the value of t as the start-value, you can use `reduce`, it works like this: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #reduce } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #reduce } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports6 #reduce } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports6 #reduce } Same as with `fold`, the execution will be done asynchronously when the last of the `Future` is completed, you can also parallelize it by chunking your futures into sub-sequences and reduce them, and then reduce the reduced results again. @@ -371,24 +371,24 @@ Sometimes you just want to listen to a `Future` being completed, and react to th For this `Future` supports `onComplete`, `onSuccess` and `onFailure`, of which the last two are specializations of the first. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onSuccess } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #onSuccess } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onSuccess } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #onSuccess } Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onFailure } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #onFailure } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onFailure } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #onFailure } Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onComplete } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #onComplete } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onComplete } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #onComplete } ## Define Ordering @@ -399,10 +399,10 @@ the specified callback, a `Future` that will have the same result as the `Future which allows for ordering like in the following sample: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #and-then } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #and-then } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #and-then } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #and-then } ## Auxiliary Methods @@ -410,19 +410,19 @@ Java if the first `Future` fails. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #fallback-to } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #fallback-to } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #fallback-to } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #fallback-to } You can also combine two Futures into a new `Future` that will hold a tuple of the two Futures successful results, using the `zip` operation. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #zip } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #zip } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #zip } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #zip } ## Exceptions @@ -435,10 +435,10 @@ It is also possible to handle an `Exception` by returning a different result. This is done with the `recover` method. For example: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #recover } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #recover } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #recover } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #recover } In this example, if the actor replied with a `akka.actor.Status.Failure` containing the `ArithmeticException`, our `Future` would have a result of 0. The `recover` method works very similarly to the standard try/catch blocks, @@ -449,30 +449,30 @@ You can also use the `recoverWith` method, which has the same relationship to `r and is use like this: Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #try-recover } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #try-recover } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #try-recover } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #try-recover } ## After `akka.pattern.after` makes it easy to complete a `Future` with a value or exception after a timeout. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #after } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #after } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports7 #after } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports7 #after } ## Retry @scala[`akka.pattern.retry`]@java[`akka.pattern.PatternsCS.retry`] will retry a @scala[`Future` class]@java[`CompletionStage` class] some number of times with a delay between each attempt. Scala -: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #retry } +: @@snip [FutureDocSpec.scala](/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala) { #retry } Java -: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports8 #retry } +: @@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #imports8 #retry } @@@ div { .group-java } @@ -515,7 +515,7 @@ All *async* methods without an explicit Executor are performed using the `ForkJo When non-async methods are applied on a not yet completed `CompletionStage`, they are completed by the thread which completes initial `CompletionStage`: -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-completion-thread } +@@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #apply-completion-thread } In this example Scala `Future` is converted to `CompletionStage` just like Akka does. The completion is delayed: we are calling `thenApply` multiple times on a not yet complete `CompletionStage`, then @@ -530,7 +530,7 @@ default `thenApply` breaks the chain and executes on `ForkJoinPool.commonPool()` In the next example `thenApply` methods are executed on an already completed `Future`/`CompletionStage`: -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-main-thread } +@@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #apply-main-thread } First `thenApply` is still executed on `ForkJoinPool.commonPool()` (because it is actually `thenApplyAsync` which is always executed on global Java pool). @@ -546,11 +546,11 @@ and stages are executed on the current thread - the thread which called second a As mentioned above, default *async* methods are always executed on `ForkJoinPool.commonPool()`: -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-async-default } +@@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #apply-async-default } `CompletionStage` also has *async* methods which take `Executor` as a second parameter, just like `Future`: -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-async-executor } +@@snip [FutureDocTest.java](/akka-docs/src/test/java/jdocs/future/FutureDocTest.java) { #apply-async-executor } This example is behaving like `Future`: every stage is executed on an explicitly specified `Executor`. diff --git a/akka-docs/src/main/paradox/general/configuration.md b/akka-docs/src/main/paradox/general/configuration.md index a70d102e1a..1b1e7d60f4 100644 --- a/akka-docs/src/main/paradox/general/configuration.md +++ b/akka-docs/src/main/paradox/general/configuration.md @@ -310,7 +310,7 @@ substitutions. You may also specify and parse the configuration programmatically in other ways when instantiating the `ActorSystem`. -@@snip [ConfigDocSpec.scala]($code$/scala/docs/config/ConfigDocSpec.scala) { #imports #custom-config } +@@snip [ConfigDocSpec.scala](/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala) { #imports #custom-config } ## Reading configuration from a custom location @@ -353,7 +353,7 @@ you could put a config string in code using You can also combine your custom config with the usual config, that might look like: -@@snip [ConfigDoc.java]($code$/java/jdocs/config/ConfigDoc.java) { #java-custom-config } +@@snip [ConfigDoc.java](/akka-docs/src/test/java/jdocs/config/ConfigDoc.java) { #java-custom-config } When working with `Config` objects, keep in mind that there are three "layers" in the cake: @@ -388,7 +388,7 @@ things like dispatcher, mailbox, router settings, and remote deployment. Configuration of these features are described in the chapters detailing corresponding topics. An example may look like this: -@@snip [ConfigDocSpec.scala]($code$/scala/docs/config/ConfigDocSpec.scala) { #deployment-section } +@@snip [ConfigDocSpec.scala](/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala) { #deployment-section } @@@ note @@ -422,64 +422,64 @@ Each Akka module has a reference configuration file with the default values. ### akka-actor -@@snip [reference.conf]($akka$/akka-actor/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-actor/src/main/resources/reference.conf) ### akka-agent -@@snip [reference.conf]($akka$/akka-agent/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-agent/src/main/resources/reference.conf) ### akka-camel -@@snip [reference.conf]($akka$/akka-camel/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-camel/src/main/resources/reference.conf) ### akka-cluster -@@snip [reference.conf]($akka$/akka-cluster/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-cluster/src/main/resources/reference.conf) ### akka-multi-node-testkit -@@snip [reference.conf]($akka$/akka-multi-node-testkit/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-multi-node-testkit/src/main/resources/reference.conf) ### akka-persistence -@@snip [reference.conf]($akka$/akka-persistence/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-persistence/src/main/resources/reference.conf) ### akka-remote -@@snip [reference.conf]($akka$/akka-remote/src/main/resources/reference.conf) { #shared #classic type=none } +@@snip [reference.conf](/akka-remote/src/main/resources/reference.conf) { #shared #classic type=none } ### akka-remote (artery) -@@snip [reference.conf]($akka$/akka-remote/src/main/resources/reference.conf) { #shared #artery type=none } +@@snip [reference.conf](/akka-remote/src/main/resources/reference.conf) { #shared #artery type=none } ### akka-testkit -@@snip [reference.conf]($akka$/akka-testkit/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-testkit/src/main/resources/reference.conf) ### akka-cluster-metrics -@@snip [reference.conf]($akka$/akka-cluster-metrics/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-cluster-metrics/src/main/resources/reference.conf) ### akka-cluster-tools -@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-cluster-tools/src/main/resources/reference.conf) ### akka-cluster-sharding -@@snip [reference.conf]($akka$/akka-cluster-sharding/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-cluster-sharding/src/main/resources/reference.conf) ### akka-distributed-data -@@snip [reference.conf]($akka$/akka-distributed-data/src/main/resources/reference.conf) +@@snip [reference.conf](/akka-distributed-data/src/main/resources/reference.conf) diff --git a/akka-docs/src/main/paradox/general/jmm.md b/akka-docs/src/main/paradox/general/jmm.md index 0afff787d6..9b91cd8483 100644 --- a/akka-docs/src/main/paradox/general/jmm.md +++ b/akka-docs/src/main/paradox/general/jmm.md @@ -67,6 +67,6 @@ Since Akka runs on the JVM there are still some rules to be followed. * Closing over internal Actor state and exposing it to other threads -@@snip [SharedMutableStateDocSpec.scala]($code$/scala/docs/actor/SharedMutableStateDocSpec.scala) { #mutable-state } +@@snip [SharedMutableStateDocSpec.scala](/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala) { #mutable-state } - * Messages **should** be immutable, this is to avoid the shared mutable state trap. \ No newline at end of file + * Messages **should** be immutable, this is to avoid the shared mutable state trap. diff --git a/akka-docs/src/main/paradox/general/stream/stream-configuration.md b/akka-docs/src/main/paradox/general/stream/stream-configuration.md index c176d06dc0..9170e10443 100644 --- a/akka-docs/src/main/paradox/general/stream/stream-configuration.md +++ b/akka-docs/src/main/paradox/general/stream/stream-configuration.md @@ -1,3 +1,3 @@ # Configuration -@@snip [reference.conf]($akka$/akka-stream/src/main/resources/reference.conf) \ No newline at end of file +@@snip [reference.conf](/akka-stream/src/main/resources/reference.conf) diff --git a/akka-docs/src/main/paradox/general/supervision.md b/akka-docs/src/main/paradox/general/supervision.md index 8619537c7f..7f6c84daf9 100644 --- a/akka-docs/src/main/paradox/general/supervision.md +++ b/akka-docs/src/main/paradox/general/supervision.md @@ -210,13 +210,13 @@ to recover before the persistent actor is started. The following Scala snippet shows how to create a backoff supervisor which will start the given echo actor after it has stopped because of a failure, in increasing intervals of 3, 6, 12, 24 and finally 30 seconds: -@@snip [BackoffSupervisorDocSpec.scala]($code$/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-stop } +@@snip [BackoffSupervisorDocSpec.scala](/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-stop } The above is equivalent to this Java code: -@@snip [BackoffSupervisorDocTest.java]($code$/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-imports } +@@snip [BackoffSupervisorDocTest.java](/akka-docs/src/test/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-imports } -@@snip [BackoffSupervisorDocTest.java]($code$/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-stop } +@@snip [BackoffSupervisorDocTest.java](/akka-docs/src/test/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-stop } Using a `randomFactor` to add a little bit of additional variance to the backoff intervals is highly recommended, in order to avoid multiple actors re-start at the exact same point in time, @@ -231,23 +231,23 @@ crashes and the supervision strategy decides that it should restart. The following Scala snippet shows how to create a backoff supervisor which will start the given echo actor after it has crashed because of some exception, in increasing intervals of 3, 6, 12, 24 and finally 30 seconds: -@@snip [BackoffSupervisorDocSpec.scala]($code$/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-fail } +@@snip [BackoffSupervisorDocSpec.scala](/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-fail } The above is equivalent to this Java code: -@@snip [BackoffSupervisorDocTest.java]($code$/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-imports } +@@snip [BackoffSupervisorDocTest.java](/akka-docs/src/test/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-imports } -@@snip [BackoffSupervisorDocTest.java]($code$/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-fail } +@@snip [BackoffSupervisorDocTest.java](/akka-docs/src/test/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-fail } The `akka.pattern.BackoffOptions` can be used to customize the behavior of the back-off supervisor actor, below are some examples: -@@snip [BackoffSupervisorDocSpec.scala]($code$/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-custom-stop } +@@snip [BackoffSupervisorDocSpec.scala](/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-custom-stop } The above code sets up a back-off supervisor that requires the child actor to send a `akka.pattern.BackoffSupervisor.Reset` message to its parent when a message is successfully processed, resetting the back-off. It also uses a default stopping strategy, any exception will cause the child to stop. -@@snip [BackoffSupervisorDocSpec.scala]($code$/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-custom-fail } +@@snip [BackoffSupervisorDocSpec.scala](/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-custom-fail } The above code sets up a back-off supervisor that restarts the child after back-off if MyException is thrown, any other exception will be escalated. The back-off is automatically reset if the child does not throw any errors within 10 seconds. diff --git a/akka-docs/src/main/paradox/guide/tutorial_1.md b/akka-docs/src/main/paradox/guide/tutorial_1.md index fbd4787d1a..8412d8e98c 100644 --- a/akka-docs/src/main/paradox/guide/tutorial_1.md +++ b/akka-docs/src/main/paradox/guide/tutorial_1.md @@ -40,10 +40,10 @@ The easiest way to see the actor hierarchy in action is to print `ActorRef` inst In your Hello World project, navigate to the `com.lightbend.akka.sample` package and create a new @scala[Scala file called `ActorHierarchyExperiments.scala`]@java[Java file called `ActorHierarchyExperiments.java`] here. Copy and paste the code from the snippet below to this new source file. Save your file and run `sbt "runMain com.lightbend.akka.sample.ActorHierarchyExperiments"` to observe the output. Scala -: @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #print-refs } +: @@snip [ActorHierarchyExperiments.scala](/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala) { #print-refs } Java -: @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #print-refs } +: @@snip [ActorHierarchyExperiments.java](/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #print-refs } Note the way a message asked the first actor to do its work. We sent the message by using the parent's reference: @scala[`firstRef ! "printit"`]@java[`firstRef.tell("printit", ActorRef.noSender())`]. When the code executes, the output includes the references for the first actor and the child it created as part of the `printit` case. Your output should look similar to the following: @@ -79,18 +79,18 @@ The Akka actor API exposes many lifecycle hooks that you can override in an acto Let's use the `preStart()` and `postStop()` lifecycle hooks in a simple experiment to observe the behavior when we stop an actor. First, add the following 2 actor classes to your project: Scala -: @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #start-stop } +: @@snip [ActorHierarchyExperiments.scala](/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala) { #start-stop } Java -: @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #start-stop } +: @@snip [ActorHierarchyExperiments.java](/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #start-stop } And create a 'main' class like above to start the actors and then send them a `"stop"` message: Scala -: @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #start-stop-main } +: @@snip [ActorHierarchyExperiments.scala](/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala) { #start-stop-main } Java -: @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #start-stop-main } +: @@snip [ActorHierarchyExperiments.java](/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #start-stop-main } You can again use `sbt` to start this program. The output should look like this: @@ -115,18 +115,18 @@ stop and restart the child. If you don't change the default strategy all failure Let's observe the default strategy in a simple experiment. Add the following classes to your project, just as you did with the previous ones: Scala -: @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #supervise } +: @@snip [ActorHierarchyExperiments.scala](/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala) { #supervise } Java -: @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #supervise } +: @@snip [ActorHierarchyExperiments.java](/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #supervise } And run with: Scala -: @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #supervise-main } +: @@snip [ActorHierarchyExperiments.scala](/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala) { #supervise-main } Java -: @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #supervise-main } +: @@snip [ActorHierarchyExperiments.java](/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #supervise-main } You should see output similar to the following: diff --git a/akka-docs/src/main/paradox/guide/tutorial_2.md b/akka-docs/src/main/paradox/guide/tutorial_2.md index 0cc1acb451..88b27a0292 100644 --- a/akka-docs/src/main/paradox/guide/tutorial_2.md +++ b/akka-docs/src/main/paradox/guide/tutorial_2.md @@ -24,10 +24,10 @@ We can define the first actor, the IotSupervisor, with a few simple lines of cod 1. Paste the following code into the new file to define the IotSupervisor. Scala -: @@snip [IotSupervisor.scala]($code$/scala/tutorial_2/IotSupervisor.scala) { #iot-supervisor } +: @@snip [IotSupervisor.scala](/akka-docs/src/test/scala/tutorial_2/IotSupervisor.scala) { #iot-supervisor } Java -: @@snip [IotSupervisor.java]($code$/java/jdocs/tutorial_2/IotSupervisor.java) { #iot-supervisor } +: @@snip [IotSupervisor.java](/akka-docs/src/test/java/jdocs/tutorial_2/IotSupervisor.java) { #iot-supervisor } The code is similar to the actor examples we used in the previous experiments, but notice: @@ -37,10 +37,10 @@ The code is similar to the actor examples we used in the previous experiments, b To provide the `main` entry point that creates the actor system, add the following code to the new @scala[`IotApp` object] @java[`IotMain` class]. Scala -: @@snip [IotApp.scala]($code$/scala/tutorial_2/IotApp.scala) { #iot-app } +: @@snip [IotApp.scala](/akka-docs/src/test/scala/tutorial_2/IotApp.scala) { #iot-app } Java -: @@snip [IotMain.java]($code$/java/jdocs/tutorial_2/IotMain.java) { #iot-app } +: @@snip [IotMain.java](/akka-docs/src/test/java/jdocs/tutorial_2/IotMain.java) { #iot-app } The application does little, other than print out that it is started. But, we have the first actor in place and we are ready to add other actors. diff --git a/akka-docs/src/main/paradox/guide/tutorial_3.md b/akka-docs/src/main/paradox/guide/tutorial_3.md index 6a0c2d939f..5884e54dab 100644 --- a/akka-docs/src/main/paradox/guide/tutorial_3.md +++ b/akka-docs/src/main/paradox/guide/tutorial_3.md @@ -37,10 +37,10 @@ The protocol for obtaining the current temperature from the device actor is simp We need two messages, one for the request, and one for the reply. Our first attempt might look like the following: Scala -: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_3/DeviceInProgress.scala) { #read-protocol-1 } +: @@snip [DeviceInProgress.scala](/akka-docs/src/test/scala/tutorial_3/DeviceInProgress.scala) { #read-protocol-1 } Java -: @@snip [DeviceInProgress.java]($code$/java/jdocs/tutorial_3/DeviceInProgress.java) { #read-protocol-1 } +: @@snip [DeviceInProgress.java](/akka-docs/src/test/java/jdocs/tutorial_3/DeviceInProgress.java) { #read-protocol-1 } These two messages seem to cover the required functionality. However, the approach we choose must take into account the distributed nature of the application. While the basic mechanism is the same for communicating with an actor on the local JVM as with a remote actor, we need to keep the following in mind: @@ -123,20 +123,20 @@ For the full details on delivery guarantees please refer to the @ref:[reference Our first query protocol was correct, but did not take into account distributed application execution. If we want to implement resends in the actor that queries a device actor (because of timed out requests), or if we want to query multiple actors, we need to be able to correlate requests and responses. Hence, we add one more field to our messages, so that an ID can be provided by the requester (we will add this code to our app in a later step): Scala -: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_3/DeviceInProgress.scala) { #read-protocol-2 } +: @@snip [DeviceInProgress.scala](/akka-docs/src/test/scala/tutorial_3/DeviceInProgress.scala) { #read-protocol-2 } Java -: @@snip [DeviceInProgress2.java]($code$/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java) { #read-protocol-2 } +: @@snip [DeviceInProgress2.java](/akka-docs/src/test/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java) { #read-protocol-2 } ## Defining the device actor and its read protocol As we learned in the Hello World example, each actor defines the type of messages it will accept. Our device actor has the responsibility to use the same ID parameter for the response of a given query, which would make it look like the following. Scala -: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_3/DeviceInProgress.scala) { #device-with-read } +: @@snip [DeviceInProgress.scala](/akka-docs/src/test/scala/tutorial_3/DeviceInProgress.scala) { #device-with-read } Java -: @@snip [DeviceInProgress2.java]($code$/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java) { #device-with-read } +: @@snip [DeviceInProgress2.java](/akka-docs/src/test/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java) { #device-with-read } Note in the code that: @@ -152,10 +152,10 @@ Based on the simple actor above, we could write a simple test. In the `com.light You can run this test @java[by running `mvn test` or] by running `test` at the sbt prompt. Scala -: @@snip [DeviceSpec.scala]($code$/scala/tutorial_3/DeviceSpec.scala) { #device-read-test } +: @@snip [DeviceSpec.scala](/akka-docs/src/test/scala/tutorial_3/DeviceSpec.scala) { #device-read-test } Java -: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_3/DeviceTest.java) { #device-read-test } +: @@snip [DeviceTest.java](/akka-docs/src/test/java/jdocs/tutorial_3/DeviceTest.java) { #device-read-test } Now, the actor needs a way to change the state of the temperature when it receives a message from the sensor. @@ -164,10 +164,10 @@ Now, the actor needs a way to change the state of the temperature when it receiv The purpose of the write protocol is to update the `currentTemperature` field when the actor receives a message that contains the temperature. Again, it is tempting to define the write protocol as a very simple message, something like this: Scala -: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_3/DeviceInProgress.scala) { #write-protocol-1 } +: @@snip [DeviceInProgress.scala](/akka-docs/src/test/scala/tutorial_3/DeviceInProgress.scala) { #write-protocol-1 } Java -: @@snip [DeviceInProgress3.java]($code$/java/jdocs/tutorial_3/DeviceInProgress3.java) { #write-protocol-1 } +: @@snip [DeviceInProgress3.java](/akka-docs/src/test/java/jdocs/tutorial_3/DeviceInProgress3.java) { #write-protocol-1 } However, this approach does not take into account that the sender of the record temperature message can never be sure if the message was processed or not. We have seen that Akka does not guarantee delivery of these messages and leaves it to the application to provide success notifications. In our case, we would like to send an acknowledgment to the sender once we have updated our last temperature recording, e.g. @scala[`final case class TemperatureRecorded(requestId: Long)`]@java[`TemperatureRecorded`]. Just like in the case of temperature queries and responses, it is a good idea to include an ID field to provide maximum flexibility. @@ -177,18 +177,18 @@ Just like in the case of temperature queries and responses, it is a good idea to Putting the read and write protocol together, the device actor looks like the following example: Scala -: @@snip [Device.scala]($code$/scala/tutorial_3/Device.scala) { #full-device } +: @@snip [Device.scala](/akka-docs/src/test/scala/tutorial_3/Device.scala) { #full-device } Java -: @@snip [Device.java]($code$/java/jdocs/tutorial_3/Device.java) { #full-device } +: @@snip [Device.java](/akka-docs/src/test/java/jdocs/tutorial_3/Device.java) { #full-device } We should also write a new test case now, exercising both the read/query and write/record functionality together: Scala: -: @@snip [DeviceSpec.scala]($code$/scala/tutorial_3/DeviceSpec.scala) { #device-write-read-test } +: @@snip [DeviceSpec.scala](/akka-docs/src/test/scala/tutorial_3/DeviceSpec.scala) { #device-write-read-test } Java: -: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_3/DeviceTest.java) { #device-write-read-test } +: @@snip [DeviceTest.java](/akka-docs/src/test/java/jdocs/tutorial_3/DeviceTest.java) { #device-write-read-test } ## What's Next? diff --git a/akka-docs/src/main/paradox/guide/tutorial_4.md b/akka-docs/src/main/paradox/guide/tutorial_4.md index e54f9d3e73..370738f00d 100644 --- a/akka-docs/src/main/paradox/guide/tutorial_4.md +++ b/akka-docs/src/main/paradox/guide/tutorial_4.md @@ -78,10 +78,10 @@ The messages that we will use to communicate registration requests and their acknowledgement have a simple definition: Scala -: @@snip [DeviceManager.scala]($code$/scala/tutorial_4/DeviceManager.scala) { #device-manager-msgs } +: @@snip [DeviceManager.scala](/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala) { #device-manager-msgs } Java -: @@snip [DeviceManager.java]($code$/java/jdocs/tutorial_4/DeviceManager.java) { #device-manager-msgs } +: @@snip [DeviceManager.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceManager.java) { #device-manager-msgs } In this case we have not included a request ID field in the messages. Since registration happens once, when the component connects the system to some network protocol, the ID is not important. However, it is usually a best practice to include a request ID. @@ -97,10 +97,10 @@ message is preserved in the upper layers.* We will show you in the next section The device actor registration code looks like the following. Modify your example to match. Scala -: @@snip [Device.scala]($code$/scala/tutorial_4/Device.scala) { #device-with-register } +: @@snip [Device.scala](/akka-docs/src/test/scala/tutorial_4/Device.scala) { #device-with-register } Java -: @@snip [Device.java]($code$/java/jdocs/tutorial_4/Device.java) { #device-with-register } +: @@snip [Device.java](/akka-docs/src/test/java/jdocs/tutorial_4/Device.java) { #device-with-register } @@@ note { .group-scala } @@ -111,10 +111,10 @@ We used a feature of scala pattern matching where we can check to see if a certa We can now write two new test cases, one exercising successful registration, the other testing the case when IDs don't match: Scala -: @@snip [DeviceSpec.scala]($code$/scala/tutorial_4/DeviceSpec.scala) { #device-registration-tests } +: @@snip [DeviceSpec.scala](/akka-docs/src/test/scala/tutorial_4/DeviceSpec.scala) { #device-registration-tests } Java -: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_4/DeviceTest.java) { #device-registration-tests } +: @@snip [DeviceTest.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceTest.java) { #device-registration-tests } @@@ note @@ -138,27 +138,27 @@ We also want to keep the ID of the original sender of the request so that our de sender while @scala[`!`] @java[`tell`] sets the sender to be the current actor. Just like with our device actor, we ensure that we don't respond to wrong group IDs. Add the following to your source file: Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_4/DeviceGroup.scala) { #device-group-register } +: @@snip [DeviceGroup.scala](/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala) { #device-group-register } Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-register } +: @@snip [DeviceGroup.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-register } Just as we did with the device, we test this new functionality. We also test that the actors returned for the two different IDs are actually different, and we also attempt to record a temperature reading for each of the devices to see if the actors are responding. Scala -: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-test-registration } +: @@snip [DeviceGroupSpec.scala](/akka-docs/src/test/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-test-registration } Java -: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-test-registration } +: @@snip [DeviceGroupTest.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-test-registration } If a device actor already exists for the registration request, we would like to use the existing actor instead of a new one. We have not tested this yet, so we need to fix this: Scala -: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-test3 } +: @@snip [DeviceGroupSpec.scala](/akka-docs/src/test/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-test3 } Java -: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-test3 } +: @@snip [DeviceGroupTest.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-test3 } ### Keeping track of the device actors in the group @@ -177,19 +177,19 @@ Unfortunately, the `Terminated` message only contains the `ActorRef` of the chil Adding the functionality to identify the actor results in this: Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_4/DeviceGroup.scala) { #device-group-remove } +: @@snip [DeviceGroup.scala](/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala) { #device-group-remove } Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-remove } +: @@snip [DeviceGroup.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-remove } So far we have no means to get which devices the group device actor keeps track of and, therefore, we cannot test our new functionality yet. To make it testable, we add a new query capability (message @scala[`RequestDeviceList(requestId: Long)`] @java[`RequestDeviceList`]) that lists the currently active device IDs: Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_4/DeviceGroup.scala) { #device-group-full } +: @@snip [DeviceGroup.scala](/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala) { #device-group-full } Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-full } +: @@snip [DeviceGroup.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-full } We are almost ready to test the removal of devices. But, we still need the following capabilities: @@ -201,20 +201,20 @@ We are almost ready to test the removal of devices. But, we still need the follo We add two more test cases now. In the first, we test that we get back the list of proper IDs once we have added a few devices. The second test case makes sure that the device ID is properly removed after the device actor has been stopped: Scala -: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-list-terminate-test } +: @@snip [DeviceGroupSpec.scala](/akka-docs/src/test/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-list-terminate-test } Java -: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-list-terminate-test } +: @@snip [DeviceGroupTest.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-list-terminate-test } ## Creating device manager actors Going up to the next level in our hierarchy, we need to create the entry point for our device manager component in the `DeviceManager` source file. This actor is very similar to the device group actor, but creates device group actors instead of device actors: Scala -: @@snip [DeviceManager.scala]($code$/scala/tutorial_4/DeviceManager.scala) { #device-manager-full } +: @@snip [DeviceManager.scala](/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala) { #device-manager-full } Java -: @@snip [DeviceManager.java]($code$/java/jdocs/tutorial_4/DeviceManager.java) { #device-manager-full } +: @@snip [DeviceManager.java](/akka-docs/src/test/java/jdocs/tutorial_4/DeviceManager.java) { #device-manager-full } We leave tests of the device manager as an exercise for you since it is very similar to the tests we have already written for the group actor. diff --git a/akka-docs/src/main/paradox/guide/tutorial_5.md b/akka-docs/src/main/paradox/guide/tutorial_5.md index 0468bb6bfd..f85b50de11 100644 --- a/akka-docs/src/main/paradox/guide/tutorial_5.md +++ b/akka-docs/src/main/paradox/guide/tutorial_5.md @@ -48,10 +48,10 @@ for each device actor, with respect to a temperature query: Summarizing these in message types we can add the following to `DeviceGroup`: Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_5/DeviceGroup.scala) { #query-protocol } +: @@snip [DeviceGroup.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala) { #query-protocol } Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_5/DeviceGroup.java) { #query-protocol } +: @@snip [DeviceGroup.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroup.java) { #query-protocol } ## Implementing the query @@ -89,10 +89,10 @@ until the timeout to mark these as not available. Putting this together, the outline of our `DeviceGroupQuery` actor looks like this: Scala -: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_5/DeviceGroupQuery.scala) { #query-outline } +: @@snip [DeviceGroupQuery.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala) { #query-outline } Java -: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-outline } +: @@snip [DeviceGroupQuery.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-outline } #### Tracking actor state @@ -123,10 +123,10 @@ To accomplish this, add the following to your `DeviceGroupQuery` source file: Scala -: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_5/DeviceGroupQuery.scala) { #query-state } +: @@snip [DeviceGroupQuery.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala) { #query-state } Java -: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-state } +: @@snip [DeviceGroupQuery.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-state } It is not yet clear how we will "mutate" the `repliesSoFar` and `stillWaiting` data structures. One important thing to note is that the function `waitingForReplies` **does not handle the messages directly. It returns a `Receive` function that will handle the messages**. This means that if we call `waitingForReplies` again, with different parameters, then it returns a brand new `Receive` that will use those new parameters. @@ -153,10 +153,10 @@ only the first call will have any effect, the rest is ignored. With all this knowledge, we can create the `receivedResponse` method: Scala -: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_5/DeviceGroupQuery.scala) { #query-collect-reply } +: @@snip [DeviceGroupQuery.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala) { #query-collect-reply } Java -: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-collect-reply } +: @@snip [DeviceGroupQuery.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-collect-reply } It is quite natural to ask at this point, what have we gained by using the `context.become()` trick instead of making the `repliesSoFar` and `stillWaiting` structures mutable fields of the actor (i.e. `var`s)? In this @@ -171,10 +171,10 @@ with the solution we have used here as it helps structuring more complex actor c Our query actor is now done: Scala -: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_5/DeviceGroupQuery.scala) { #query-full } +: @@snip [DeviceGroupQuery.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala) { #query-full } Java -: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-full } +: @@snip [DeviceGroupQuery.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-full } ### Testing the query actor @@ -185,46 +185,46 @@ to the query actor, so we can pass in @scala[`TestProbe`] @java[`TestKit`] refer there are two devices and both report a temperature: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-normal } +: @@snip [DeviceGroupQuerySpec.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-normal } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-normal } +: @@snip [DeviceGroupQueryTest.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-normal } That was the happy case, but we know that sometimes devices cannot provide a temperature measurement. This scenario is just slightly different from the previous: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-no-reading } +: @@snip [DeviceGroupQuerySpec.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-no-reading } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-no-reading } +: @@snip [DeviceGroupQueryTest.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-no-reading } We also know, that sometimes device actors stop before answering: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-stopped } +: @@snip [DeviceGroupQuerySpec.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-stopped } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-stopped } +: @@snip [DeviceGroupQueryTest.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-stopped } If you remember, there is another case related to device actors stopping. It is possible that we get a normal reply from a device actor, but then receive a `Terminated` for the same actor later. In this case, we would like to keep the first reply and not mark the device as `DeviceNotAvailable`. We should test this, too: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-stopped-later } +: @@snip [DeviceGroupQuerySpec.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-stopped-later } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-stopped-later } +: @@snip [DeviceGroupQueryTest.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-stopped-later } The final case is when not all devices respond in time. To keep our test relatively fast, we will construct the `DeviceGroupQuery` actor with a smaller timeout: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-timeout } +: @@snip [DeviceGroupQuerySpec.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-timeout } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-timeout } +: @@snip [DeviceGroupQueryTest.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-timeout } Our query works as expected now, it is time to include this new functionality in the `DeviceGroup` actor now. @@ -234,10 +234,10 @@ Including the query feature in the group actor is fairly simple now. We did all itself, the group actor only needs to create it with the right initial parameters and nothing else. Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_5/DeviceGroup.scala) { #query-added } +: @@snip [DeviceGroup.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala) { #query-added } Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_5/DeviceGroup.java) { #query-added } +: @@snip [DeviceGroup.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroup.java) { #query-added } It is probably worth restating what we said at the beginning of the chapter. By keeping the temporary state that is only relevant to the query itself in a separate actor we keep the group actor implementation very simple. It delegates everything to child actors and therefore does not have to keep state that is not relevant to its core business. Also, multiple queries can now run parallel to each other, in fact, as many as needed. In our case querying an individual device actor is a fast operation, but if this were not the case, for example, because the remote sensors need to be contacted over the network, this design would significantly improve throughput. @@ -245,10 +245,10 @@ everything to child actors and therefore does not have to keep state that is not We close this chapter by testing that everything works together. This test is a variant of the previous ones, now exercising the group query feature: Scala -: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_5/DeviceGroupSpec.scala) { #group-query-integration-test } +: @@snip [DeviceGroupSpec.scala](/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala) { #group-query-integration-test } Java -: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupTest.java) { #group-query-integration-test } +: @@snip [DeviceGroupTest.java](/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupTest.java) { #group-query-integration-test } ## Summary In the context of the IoT system, this guide introduced the following concepts, among others. You can follow the links to review them if necessary: diff --git a/akka-docs/src/main/paradox/howto.md b/akka-docs/src/main/paradox/howto.md index 4790c2b24e..61e7de4db1 100644 --- a/akka-docs/src/main/paradox/howto.md +++ b/akka-docs/src/main/paradox/howto.md @@ -148,7 +148,7 @@ Finally the promise returned by Patterns.ask() is fulfilled as a failure, includ Let's have a look at the example code: -@@snip [SupervisedAsk.java]($code$/java/jdocs/pattern/SupervisedAsk.java) +@@snip [SupervisedAsk.java](/akka-docs/src/test/java/jdocs/pattern/SupervisedAsk.java) In the askOf method the SupervisorCreator is sent the user message. The SupervisorCreator creates a SupervisorActor and forwards the message. @@ -161,7 +161,7 @@ Afterwards the actor hierarchy is stopped. Finally we are able to execute an actor and receive the results or exceptions. -@@snip [SupervisedAskSpec.java]($code$/java/jdocs/pattern/SupervisedAskSpec.java) +@@snip [SupervisedAskSpec.java](/akka-docs/src/test/java/jdocs/pattern/SupervisedAskSpec.java) @@@ diff --git a/akka-docs/src/main/paradox/io-tcp.md b/akka-docs/src/main/paradox/io-tcp.md index 2faf784a6d..22385d32a1 100644 --- a/akka-docs/src/main/paradox/io-tcp.md +++ b/akka-docs/src/main/paradox/io-tcp.md @@ -15,19 +15,19 @@ To use TCP, you must add the following dependency in your project: The code snippets through-out this section assume the following imports: Scala -: @@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #imports } +: @@snip [IODocSpec.scala](/akka-docs/src/test/scala/docs/io/IODocSpec.scala) { #imports } Java -: @@snip [IODocTest.java]($code$/java/jdocs/io/japi/IODocTest.java) { #imports } +: @@snip [IODocTest.java](/akka-docs/src/test/java/jdocs/io/japi/IODocTest.java) { #imports } All of the Akka I/O APIs are accessed through manager objects. When using an I/O API, the first step is to acquire a reference to the appropriate manager. The code below shows how to acquire a reference to the `Tcp` manager. Scala -: @@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #manager } +: @@snip [IODocSpec.scala](/akka-docs/src/test/scala/docs/io/IODocSpec.scala) { #manager } Java -: @@snip [EchoManager.java]($code$/java/jdocs/io/japi/EchoManager.java) { #manager } +: @@snip [EchoManager.java](/akka-docs/src/test/java/jdocs/io/japi/EchoManager.java) { #manager } The manager is an actor that handles the underlying low level I/O resources (selectors, channels) and instantiates workers for specific tasks, such as listening to incoming connections. @@ -35,10 +35,10 @@ workers for specific tasks, such as listening to incoming connections. ## Connecting Scala -: @@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #client } +: @@snip [IODocSpec.scala](/akka-docs/src/test/scala/docs/io/IODocSpec.scala) { #client } Java -: @@snip [IODocTest.java]($code$/java/jdocs/io/japi/IODocTest.java) { #client } +: @@snip [IODocTest.java](/akka-docs/src/test/java/jdocs/io/japi/IODocTest.java) { #client } The first step of connecting to a remote address is sending a @scala[`Connect` message]@java[message by the `TcpMessage.connect` method] to the TCP manager; in addition to the simplest form shown above there @@ -81,10 +81,10 @@ fine-grained connection close events, see [Closing Connections](#closing-connect ## Accepting connections Scala -: @@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #server } +: @@snip [IODocSpec.scala](/akka-docs/src/test/scala/docs/io/IODocSpec.scala) { #server } Java -: @@snip [IODocTest.java]($code$/java/jdocs/io/japi/IODocTest.java) { #server } +: @@snip [IODocTest.java](/akka-docs/src/test/java/jdocs/io/japi/IODocTest.java) { #server } To create a TCP server and listen for inbound connections, a @scala[`Bind` command]@java[message by the `TcpMessage.bind` method] has to be sent to the TCP manager. This will instruct the TCP manager @@ -104,10 +104,10 @@ actor in the system to the connection actor (i.e. the actor which sent the `Connected` message). The simplistic handler is defined as: Scala -: @@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #simplistic-handler } +: @@snip [IODocSpec.scala](/akka-docs/src/test/scala/docs/io/IODocSpec.scala) { #simplistic-handler } Java -: @@snip [IODocTest.java]($code$/java/jdocs/io/japi/IODocTest.java) { #simplistic-handler } +: @@snip [IODocTest.java](/akka-docs/src/test/java/jdocs/io/japi/IODocTest.java) { #simplistic-handler } For a more complete sample which also takes into account the possibility of failures when sending please see [Throttling Reads and Writes](#throttling-reads-and-writes) below. @@ -245,18 +245,18 @@ to the client before fully closing the connection. This is enabled using a flag upon connection activation (observe the @scala[`Register` message]@java[`TcpMessage.register` method]): Scala -: @@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #echo-manager } +: @@snip [EchoServer.scala](/akka-docs/src/test/scala/docs/io/EchoServer.scala) { #echo-manager } Java -: @@snip [EchoManager.java]($code$/java/jdocs/io/japi/EchoManager.java) { #echo-manager } +: @@snip [EchoManager.java](/akka-docs/src/test/java/jdocs/io/japi/EchoManager.java) { #echo-manager } With this preparation let us dive into the handler itself: Scala -: @@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #simple-echo-handler } +: @@snip [EchoServer.scala](/akka-docs/src/test/scala/docs/io/EchoServer.scala) { #simple-echo-handler } Java -: @@snip [SimpleEchoHandler.java]($code$/java/jdocs/io/japi/SimpleEchoHandler.java) { #simple-echo-handler } +: @@snip [SimpleEchoHandler.java](/akka-docs/src/test/java/jdocs/io/japi/SimpleEchoHandler.java) { #simple-echo-handler } The principle is simple: when having written a chunk always wait for the `Ack` to come back before sending the next chunk. While waiting we switch @@ -264,10 +264,10 @@ behavior such that new incoming data are buffered. The helper functions used are a bit lengthy but not complicated: Scala -: @@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #simple-helpers } +: @@snip [EchoServer.scala](/akka-docs/src/test/scala/docs/io/EchoServer.scala) { #simple-helpers } Java -: @@snip [SimpleEchoHandler.java]($code$/java/jdocs/io/japi/SimpleEchoHandler.java) { #simple-helpers } +: @@snip [SimpleEchoHandler.java](/akka-docs/src/test/java/jdocs/io/japi/SimpleEchoHandler.java) { #simple-helpers } The most interesting part is probably the last: an `Ack` removes the oldest data chunk from the buffer, and if that was the last chunk then we either close @@ -289,10 +289,10 @@ how end-to-end back-pressure is realized across a TCP connection. ## NACK-Based Write Back-Pressure with Suspending Scala -: @@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #echo-handler } +: @@snip [EchoServer.scala](/akka-docs/src/test/scala/docs/io/EchoServer.scala) { #echo-handler } Java -: @@snip [EchoHandler.java]($code$/java/jdocs/io/japi/EchoHandler.java) { #echo-handler } +: @@snip [EchoHandler.java](/akka-docs/src/test/java/jdocs/io/japi/EchoHandler.java) { #echo-handler } The principle here is to keep writing until a `CommandFailed` is received, using acknowledgements only to prune the resend buffer. When a such a @@ -300,10 +300,10 @@ failure was received, transition into a different state for handling and handle resending of all queued data: Scala -: @@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #buffering } +: @@snip [EchoServer.scala](/akka-docs/src/test/scala/docs/io/EchoServer.scala) { #buffering } Java -: @@snip [EchoHandler.java]($code$/java/jdocs/io/japi/EchoHandler.java) { #buffering } +: @@snip [EchoHandler.java](/akka-docs/src/test/java/jdocs/io/japi/EchoHandler.java) { #buffering } It should be noted that all writes which are currently buffered have also been sent to the connection actor upon entering this state, which means that the @@ -317,10 +317,10 @@ the first ten writes after a failure before resuming the optimistic write-through behavior. Scala -: @@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #closing } +: @@snip [EchoServer.scala](/akka-docs/src/test/scala/docs/io/EchoServer.scala) { #closing } Java -: @@snip [EchoHandler.java]($code$/java/jdocs/io/japi/EchoHandler.java) { #closing } +: @@snip [EchoHandler.java](/akka-docs/src/test/java/jdocs/io/japi/EchoHandler.java) { #closing } Closing the connection while still sending all data is a bit more involved than in the ACK-based approach: the idea is to always send all outstanding messages @@ -330,10 +330,10 @@ behavior to await the `WritingResumed` event and start over. The helper functions are very similar to the ACK-based case: Scala -: @@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #helpers } +: @@snip [EchoServer.scala](/akka-docs/src/test/scala/docs/io/EchoServer.scala) { #helpers } Java -: @@snip [EchoHandler.java]($code$/java/jdocs/io/japi/EchoHandler.java) { #helpers } +: @@snip [EchoHandler.java](/akka-docs/src/test/java/jdocs/io/japi/EchoHandler.java) { #helpers } ## Read Back-Pressure with Pull Mode @@ -346,10 +346,10 @@ With the Pull mode this buffer can be completely eliminated as the following sni demonstrates: Scala -: @@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-reading-echo } +: @@snip [ReadBackPressure.scala](/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala) { #pull-reading-echo } Java -: @@snip [JavaReadBackPressure.java]($code$/java/jdocs/io/JavaReadBackPressure.java) { #pull-reading-echo } +: @@snip [JavaReadBackPressure.java](/akka-docs/src/test/java/jdocs/io/JavaReadBackPressure.java) { #pull-reading-echo } The idea here is that reading is not resumed until the previous write has been completely acknowledged by the connection actor. Every pull mode connection @@ -363,10 +363,10 @@ To enable pull reading on an outbound connection the `pullMode` parameter of the @scala[`Connect`]@java[`TcpMessage.connect` method] should be set to `true`: Scala -: @@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-mode-connect } +: @@snip [ReadBackPressure.scala](/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala) { #pull-mode-connect } Java -: @@snip [JavaReadBackPressure.java]($code$/java/jdocs/io/JavaReadBackPressure.java) { #pull-mode-connect } +: @@snip [JavaReadBackPressure.java](/akka-docs/src/test/java/jdocs/io/JavaReadBackPressure.java) { #pull-mode-connect } ### Pull Mode Reading for Inbound Connections @@ -375,10 +375,10 @@ connections but it is possible to create a listener actor with this mode of read by setting the `pullMode` parameter of the @scala[`Bind` command]@java[`TcpMessage.bind` method] to `true`: Scala -: @@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-mode-bind } +: @@snip [ReadBackPressure.scala](/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala) { #pull-mode-bind } Java -: @@snip [JavaReadBackPressure.java]($code$/java/jdocs/io/JavaReadBackPressure.java) { #pull-mode-bind } +: @@snip [JavaReadBackPressure.java](/akka-docs/src/test/java/jdocs/io/JavaReadBackPressure.java) { #pull-mode-bind } One of the effects of this setting is that all connections accepted by this listener actor will use pull mode reading. @@ -392,10 +392,10 @@ Listener actors with pull mode start suspended so to start accepting connections a @scala[`ResumeAccepting` command]@java[message by the `TcpMessage.resumeAccepting` method] has to be sent to the listener actor after binding was successful: Scala -: @@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-accepting #pull-accepting-cont } +: @@snip [ReadBackPressure.scala](/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala) { #pull-accepting #pull-accepting-cont } Java -: @@snip [JavaReadBackPressure.java]($code$/java/jdocs/io/JavaReadBackPressure.java) { #pull-accepting } +: @@snip [JavaReadBackPressure.java](/akka-docs/src/test/java/jdocs/io/JavaReadBackPressure.java) { #pull-accepting } As shown in the example, after handling an incoming connection we need to resume accepting again. diff --git a/akka-docs/src/main/paradox/io-udp.md b/akka-docs/src/main/paradox/io-udp.md index 5af0c8c374..fb17f8c883 100644 --- a/akka-docs/src/main/paradox/io-udp.md +++ b/akka-docs/src/main/paradox/io-udp.md @@ -30,10 +30,10 @@ offered using distinct IO extensions described below. ### Simple Send Scala -: @@snip [UdpDocSpec.scala]($code$/scala/docs/io/UdpDocSpec.scala) { #sender } +: @@snip [UdpDocSpec.scala](/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala) { #sender } Java -: @@snip [UdpDocTest.java]($code$/java/jdocs/io/UdpDocTest.java) { #sender } +: @@snip [UdpDocTest.java](/akka-docs/src/test/java/jdocs/io/UdpDocTest.java) { #sender } The simplest form of UDP usage is to just send datagrams without the need of getting a reply. To this end a “simple sender” facility is provided as @@ -55,10 +55,10 @@ want to close the ephemeral port the sender is bound to. ### Bind (and Send) Scala -: @@snip [UdpDocSpec.scala]($code$/scala/docs/io/UdpDocSpec.scala) { #listener } +: @@snip [UdpDocSpec.scala](/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala) { #listener } Java -: @@snip [UdpDocTest.java]($code$/java/jdocs/io/UdpDocTest.java) { #listener } +: @@snip [UdpDocTest.java](/akka-docs/src/test/java/jdocs/io/UdpDocTest.java) { #listener } If you want to implement a UDP server which listens on a socket for incoming datagrams then you need to use the @scala[`Bind`]@java[`UdpMessage.bind`] message as shown above. The @@ -84,10 +84,10 @@ connection is only able to send to the `remoteAddress` it was connected to, and will receive datagrams only from that address. Scala -: @@snip [UdpDocSpec.scala]($code$/scala/docs/io/UdpDocSpec.scala) { #connected } +: @@snip [UdpDocSpec.scala](/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala) { #connected } Java -: @@snip [UdpDocTest.java]($code$/java/jdocs/io/UdpDocTest.java) { #connected } +: @@snip [UdpDocTest.java](/akka-docs/src/test/java/jdocs/io/UdpDocTest.java) { #connected } Consequently the example shown here looks quite similar to the previous one, the biggest difference is the absence of remote address information in @@ -114,23 +114,23 @@ class which @scala[extends]@java[implements] `akka.io.Inet.SocketOption`. Provid for opening a datagram channel by overriding `create` method. Scala -: @@snip [ScalaUdpMulticast.scala]($code$/scala/docs/io/ScalaUdpMulticast.scala) { #inet6-protocol-family } +: @@snip [ScalaUdpMulticast.scala](/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala) { #inet6-protocol-family } Java -: @@snip [JavaUdpMulticast.java]($code$/java/jdocs/io/JavaUdpMulticast.java) { #inet6-protocol-family } +: @@snip [JavaUdpMulticast.java](/akka-docs/src/test/java/jdocs/io/JavaUdpMulticast.java) { #inet6-protocol-family } Another socket option will be needed to join a multicast group. Scala -: @@snip [ScalaUdpMulticast.scala]($code$/scala/docs/io/ScalaUdpMulticast.scala) { #multicast-group } +: @@snip [ScalaUdpMulticast.scala](/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala) { #multicast-group } Java -: @@snip [JavaUdpMulticast.java]($code$/java/jdocs/io/JavaUdpMulticast.java) { #multicast-group } +: @@snip [JavaUdpMulticast.java](/akka-docs/src/test/java/jdocs/io/JavaUdpMulticast.java) { #multicast-group } Socket options must be provided to @scala[`UdpMessage.Bind`]@java[`UdpMessage.bind`] message. Scala -: @@snip [ScalaUdpMulticast.scala]($code$/scala/docs/io/ScalaUdpMulticast.scala) { #bind } +: @@snip [ScalaUdpMulticast.scala](/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala) { #bind } Java -: @@snip [JavaUdpMulticast.java]($code$/java/jdocs/io/JavaUdpMulticast.java) { #bind } +: @@snip [JavaUdpMulticast.java](/akka-docs/src/test/java/jdocs/io/JavaUdpMulticast.java) { #bind } diff --git a/akka-docs/src/main/paradox/io.md b/akka-docs/src/main/paradox/io.md index a4ed388492..8677e71a23 100644 --- a/akka-docs/src/main/paradox/io.md +++ b/akka-docs/src/main/paradox/io.md @@ -33,10 +33,10 @@ is accessible @scala[through the `IO` entry point]@java[by querying an `ActorSys looks up the TCP manager and returns its `ActorRef`: Scala -: @@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #manager } +: @@snip [IODocSpec.scala](/akka-docs/src/test/scala/docs/io/IODocSpec.scala) { #manager } Java -: @@snip [EchoManager.java]($code$/java/jdocs/io/japi/EchoManager.java) { #manager } +: @@snip [EchoManager.java](/akka-docs/src/test/java/jdocs/io/japi/EchoManager.java) { #manager } The manager receives I/O command messages and instantiates worker actors in response. The worker actors present themselves to the API user in the reply to the command that was sent. For example after a `Connect` command sent to @@ -115,4 +115,4 @@ A `ByteStringBuilder` can be wrapped in a `java.io.OutputStream` via the `asOutp ## Architecture in-depth -For further details on the design and internal architecture see @ref:[I/O Layer Design](common/io-layer.md). \ No newline at end of file +For further details on the design and internal architecture see @ref:[I/O Layer Design](common/io-layer.md). diff --git a/akka-docs/src/main/paradox/logging.md b/akka-docs/src/main/paradox/logging.md index 2b29b38fb6..4b9c52b2dd 100644 --- a/akka-docs/src/main/paradox/logging.md +++ b/akka-docs/src/main/paradox/logging.md @@ -25,11 +25,11 @@ Create a `LoggingAdapter` and use the `error`, `warning`, `info`, or `debug` met as illustrated in this example: Scala -: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-actor } +: @@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #my-actor } Java -: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports } - @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #my-actor } +: @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #imports } + @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #my-actor } @@@ div { .group-scala } @@ -65,10 +65,10 @@ the same line with the same severity). You may pass an array as the only substitution argument to have its elements be treated individually: Scala -: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #array } +: @@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #array } Java -: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #array } +: @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #array } The Java `Class` of the log source is also included in the generated `LogEvent`. In case of a simple string this is replaced with a “marker” @@ -259,7 +259,7 @@ using implicit parameters and thus fully customizable: create your own instance of `LogSource[T]` and have it in scope when creating the logger. -@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-source } +@@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #my-source } This example creates a log source which mimics traditional usage of Java loggers, which are based upon the originating object’s class name as log @@ -332,11 +332,11 @@ logger available in the 'akka-slf4j' module. Example of creating a listener: Scala -: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-event-listener } +: @@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #my-event-listener } Java -: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports #imports-listener } - @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #my-event-listener } +: @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #imports #imports-listener } + @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #my-event-listener } ## Logging to stdout during startup and shutdown @@ -512,11 +512,11 @@ if it is not set to a new map. Use `log.clearMDC()`. @@@ Scala -: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #mdc } +: @@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #mdc } Java -: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports-mdc } - @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #mdc-actor } +: @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #imports-mdc } + @@snip [LoggingDocTest.java](/akka-docs/src/test/java/jdocs/event/LoggingDocTest.java) { #mdc-actor } @@@ div { .group-scala } @@ -524,7 +524,7 @@ For convenience, you can mix in the `log` member into actors, instead of definin This trait also lets you override `def mdc(msg: Any): MDC` for specifying MDC values depending on current message and lets you forget about the cleanup as well, since it already does it for you. -@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #mdc-actor } +@@snip [LoggingDocSpec.scala](/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala) { #mdc-actor } @@@ diff --git a/akka-docs/src/main/paradox/mailboxes.md b/akka-docs/src/main/paradox/mailboxes.md index d96beda76d..e16dc2bc4a 100644 --- a/akka-docs/src/main/paradox/mailboxes.md +++ b/akka-docs/src/main/paradox/mailboxes.md @@ -25,15 +25,15 @@ by having that actor @scala[extend]@java[implement] the parameterized @scala[tra an example: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #required-mailbox-class } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #required-mailbox-class } Java -: @@snip [MyBoundedActor.java]($code$/java/jdocs/actor/MyBoundedActor.java) { #my-bounded-untyped-actor } +: @@snip [MyBoundedActor.java](/akka-docs/src/test/java/jdocs/actor/MyBoundedActor.java) { #my-bounded-untyped-actor } The type parameter to the `RequiresMessageQueue` @scala[trait]@java[interface] needs to be mapped to a mailbox in configuration like this: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #bounded-mailbox-config #required-mailbox-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #bounded-mailbox-config #required-mailbox-config } Now every time you create an actor of type `MyBoundedActor` it will try to get a bounded mailbox. If the actor has a different mailbox configured in deployment, either directly or via @@ -199,46 +199,46 @@ The following mailboxes should only be used with zero `mailbox-push-timeout-time How to create a PriorityMailbox: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-mailbox } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-mailbox } And then add it to the configuration: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher-config } And then an example on how you would use it: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-dispatcher } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-dispatcher } It is also possible to configure a mailbox type directly like this (this is a top-level configuration entry): Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config #mailbox-deployment-config } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config #mailbox-deployment-config } Java -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config-java #mailbox-deployment-config } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config-java #mailbox-deployment-config } And then use it either from deployment like this: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-config } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-config } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-config } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-config } Or code like this: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-code } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-code } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-code } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-code } ### ControlAwareMailbox @@ -247,40 +247,40 @@ immediately no matter how many other messages are already in its mailbox. It can be configured like this: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-config } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-config } Control messages need to extend the `ControlMessage` trait: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-messages } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-messages } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-mailbox-messages } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-mailbox-messages } And then an example on how you would use it: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-dispatcher } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-dispatcher } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-dispatcher } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-dispatcher } ## Creating your own Mailbox type An example is worth a thousand quacks: Scala -: @@snip [MyUnboundedMailbox.scala]($code$/scala/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-marker-interface } +: @@snip [MyUnboundedMailbox.scala](/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-marker-interface } Java -: @@snip [MyUnboundedMessageQueueSemantics.java]($code$/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java) { #mailbox-marker-interface } +: @@snip [MyUnboundedMessageQueueSemantics.java](/akka-docs/src/test/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java) { #mailbox-marker-interface } Scala -: @@snip [MyUnboundedMailbox.scala]($code$/scala/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-implementation-example } +: @@snip [MyUnboundedMailbox.scala](/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-implementation-example } Java -: @@snip [MyUnboundedMailbox.java]($code$/java/jdocs/dispatcher/MyUnboundedMailbox.java) { #mailbox-implementation-example } +: @@snip [MyUnboundedMailbox.java](/akka-docs/src/test/java/jdocs/dispatcher/MyUnboundedMailbox.java) { #mailbox-implementation-example } And then you specify the FQCN of your MailboxType as the value of the "mailbox-type" in the dispatcher configuration, or the mailbox configuration. @@ -299,15 +299,15 @@ dispatcher or mailbox setting using it. You can also use the mailbox as a requirement on the dispatcher like this: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #custom-mailbox-config-java } +@@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #custom-mailbox-config-java } Or by defining the requirement on your actor class like this: Scala -: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #require-mailbox-on-actor } +: @@snip [DispatcherDocSpec.scala](/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala) { #require-mailbox-on-actor } Java -: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #require-mailbox-on-actor } +: @@snip [DispatcherDocTest.java](/akka-docs/src/test/java/jdocs/dispatcher/DispatcherDocTest.java) { #require-mailbox-on-actor } ## Special Semantics of `system.actorOf` diff --git a/akka-docs/src/main/paradox/multi-jvm-testing.md b/akka-docs/src/main/paradox/multi-jvm-testing.md index a67fdab958..8150fab2ef 100644 --- a/akka-docs/src/main/paradox/multi-jvm-testing.md +++ b/akka-docs/src/main/paradox/multi-jvm-testing.md @@ -10,7 +10,7 @@ To configure it in your project you should do the following steps: 1. Add it as a plugin by adding the following to your project/plugins.sbt: - @@snip [plugins.sbt]($akka$/project/plugins.sbt) { #sbt-multi-jvm } + @@snip [plugins.sbt](/project/plugins.sbt) { #sbt-multi-jvm } 2. Add multi-JVM testing to `build.sbt` or `project/Build.scala` by enabling `MultiJvmPlugin` and setting the `MultiJvm` config. diff --git a/akka-docs/src/main/paradox/multi-node-testing.md b/akka-docs/src/main/paradox/multi-node-testing.md index c417bd2573..397e99963b 100644 --- a/akka-docs/src/main/paradox/multi-node-testing.md +++ b/akka-docs/src/main/paradox/multi-node-testing.md @@ -169,17 +169,17 @@ complete the test names. First we need some scaffolding to hook up the `MultiNodeSpec` with your favorite test framework. Lets define a trait `STMultiNodeSpec` that uses ScalaTest to start and stop `MultiNodeSpec`. -@@snip [STMultiNodeSpec.scala]($akka$/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala) { #example } +@@snip [STMultiNodeSpec.scala](/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala) { #example } Then we need to define a configuration. Lets use two nodes `"node1` and `"node2"` and call it `MultiNodeSampleConfig`. -@@snip [MultiNodeSample.scala]($akka$/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala) { #package #config } +@@snip [MultiNodeSample.scala](/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala) { #package #config } And then finally to the node test code. That starts the two nodes, and demonstrates a barrier, and a remote actor message send/receive. -@@snip [MultiNodeSample.scala]($akka$/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala) { #package #spec } +@@snip [MultiNodeSample.scala](/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala) { #package #spec } The easiest way to run this example yourself is to download the ready to run @extref[Akka Multi-Node Testing Sample with Scala](ecs:akka-samples-multi-node-scala) diff --git a/akka-docs/src/main/paradox/persistence-fsm.md b/akka-docs/src/main/paradox/persistence-fsm.md index bc5563ac06..de07d17735 100644 --- a/akka-docs/src/main/paradox/persistence-fsm.md +++ b/akka-docs/src/main/paradox/persistence-fsm.md @@ -31,10 +31,10 @@ To demonstrate the features of the @scala[`PersistentFSM` trait]@java[`AbstractP The contract of our "WebStoreCustomerFSMActor" is that it accepts the following commands: Scala -: @@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-commands } +: @@snip [PersistentFSMSpec.scala](/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-commands } Java -: @@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-commands } +: @@snip [AbstractPersistentFSMTest.java](/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-commands } `AddItem` sent when the customer adds an item to a shopping cart `Buy` - when the customer finishes the purchase @@ -44,10 +44,10 @@ Java The customer can be in one of the following states: Scala -: @@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-states } +: @@snip [PersistentFSMSpec.scala](/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-states } Java -: @@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-states } +: @@snip [AbstractPersistentFSMTest.java](/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-states } `LookingAround` customer is browsing the site, but hasn't added anything to the shopping cart `Shopping` customer has recently added items to the shopping cart @@ -66,26 +66,26 @@ Customer's actions are "recorded" as a sequence of "domain events" which are per start in order to restore the latest customer's state: Scala -: @@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-domain-events } +: @@snip [PersistentFSMSpec.scala](/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-domain-events } Java -: @@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-domain-events } +: @@snip [AbstractPersistentFSMTest.java](/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-domain-events } Customer state data represents the items in a customer's shopping cart: Scala -: @@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-states-data } +: @@snip [PersistentFSMSpec.scala](/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-states-data } Java -: @@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-states-data } +: @@snip [AbstractPersistentFSMTest.java](/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-states-data } Here is how everything is wired together: Scala -: @@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-fsm-body } +: @@snip [PersistentFSMSpec.scala](/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-fsm-body } Java -: @@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-fsm-body } +: @@snip [AbstractPersistentFSMTest.java](/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-fsm-body } @@@ note @@ -95,27 +95,27 @@ Override the `applyEvent` method to define how state data is affected by domain @@@ Scala -: @@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-apply-event } +: @@snip [PersistentFSMSpec.scala](/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-apply-event } Java -: @@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-apply-event } +: @@snip [AbstractPersistentFSMTest.java](/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-apply-event } `andThen` can be used to define actions which will be executed following event's persistence - convenient for "side effects" like sending a message or logging. Notice that actions defined in `andThen` block are not executed on recovery: Scala -: @@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-andthen-example } +: @@snip [PersistentFSMSpec.scala](/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-andthen-example } Java -: @@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-andthen-example } +: @@snip [AbstractPersistentFSMTest.java](/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-andthen-example } A snapshot of state data can be persisted by calling the `saveStateSnapshot()` method: Scala -: @@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-snapshot-example } +: @@snip [PersistentFSMSpec.scala](/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-snapshot-example } Java -: @@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-snapshot-example } +: @@snip [AbstractPersistentFSMTest.java](/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-snapshot-example } On recovery state data is initialized according to the latest available snapshot, then the remaining domain events are replayed, triggering the `applyEvent` method. diff --git a/akka-docs/src/main/paradox/persistence-journals.md b/akka-docs/src/main/paradox/persistence-journals.md index 6a51dcfcbe..b5a2a70791 100644 --- a/akka-docs/src/main/paradox/persistence-journals.md +++ b/akka-docs/src/main/paradox/persistence-journals.md @@ -12,30 +12,30 @@ A journal plugin extends `AsyncWriteJournal`. `AsyncWriteJournal` is an actor and the methods to be implemented are: Scala -: @@snip [AsyncWriteJournal.scala]($akka$/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala) { #journal-plugin-api } +: @@snip [AsyncWriteJournal.scala](/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala) { #journal-plugin-api } Java -: @@snip [AsyncWritePlugin.java]($akka$/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java) { #async-write-plugin-api } +: @@snip [AsyncWritePlugin.java](/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java) { #async-write-plugin-api } If the storage backend API only supports synchronous, blocking writes, the methods should be implemented as: Scala -: @@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #sync-journal-plugin-api } +: @@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #sync-journal-plugin-api } Java -: @@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #sync-journal-plugin-api } +: @@snip [LambdaPersistencePluginDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #sync-journal-plugin-api } A journal plugin must also implement the methods defined in `AsyncRecovery` for replays and sequence number recovery: Scala -: @@snip [AsyncRecovery.scala]($akka$/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala) { #journal-plugin-api } +: @@snip [AsyncRecovery.scala](/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala) { #journal-plugin-api } Java -: @@snip [AsyncRecoveryPlugin.java]($akka$/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java) { #async-replay-plugin-api } +: @@snip [AsyncRecoveryPlugin.java](/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java) { #async-replay-plugin-api } A journal plugin can be activated with the following minimal configuration: -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-plugin-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-plugin-config } The journal plugin instance is an actor so the methods corresponding to requests from persistent actors are executed sequentially. It may delegate to asynchronous libraries, spawn futures, or delegate to other @@ -60,14 +60,14 @@ Don't run journal tasks/futures on the system default dispatcher, since that mig A snapshot store plugin must extend the `SnapshotStore` actor and implement the following methods: Scala -: @@snip [SnapshotStore.scala]($akka$/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala) { #snapshot-store-plugin-api } +: @@snip [SnapshotStore.scala](/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala) { #snapshot-store-plugin-api } Java -: @@snip [SnapshotStorePlugin.java]($akka$/akka-persistence/src/main/java/akka/persistence/snapshot/japi/SnapshotStorePlugin.java) { #snapshot-store-plugin-api } +: @@snip [SnapshotStorePlugin.java](/akka-persistence/src/main/java/akka/persistence/snapshot/japi/SnapshotStorePlugin.java) { #snapshot-store-plugin-api } A snapshot store plugin can be activated with the following minimal configuration: -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-plugin-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-plugin-config } The snapshot store instance is an actor so the methods corresponding to requests from persistent actors are executed sequentially. It may delegate to asynchronous libraries, spawn futures, or delegate to other @@ -102,10 +102,10 @@ The TCK is usable from Java as well as Scala projects. To test your implementati To include the Journal TCK tests in your test suite simply extend the provided @scala[`JournalSpec`]@java[`JavaJournalSpec`]: Scala -: @@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-tck-scala } +: @@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-tck-scala } Java -: @@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-java } +: @@snip [LambdaPersistencePluginDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-java } Please note that some of the tests are optional, and by overriding the `supports...` methods you give the TCK the needed information about which tests to run. You can implement these methods using @scala[boolean values or] the @@ -119,19 +119,19 @@ typical scenarios. In order to include the `SnapshotStore` TCK tests in your test suite extend the `SnapshotStoreSpec`: Scala -: @@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-tck-scala } +: @@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-tck-scala } Java -: @@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #snapshot-store-tck-java } +: @@snip [LambdaPersistencePluginDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #snapshot-store-tck-java } In case your plugin requires some setting up (starting a mock database, removing temporary files etc.) you can override the `beforeAll` and `afterAll` methods to hook into the tests lifecycle: Scala -: @@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-tck-before-after-scala } +: @@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-tck-before-after-scala } Java -: @@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-before-after-java } +: @@snip [LambdaPersistencePluginDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-before-after-java } We *highly recommend* including these specifications in your test suite, as they cover a broad range of cases you might have otherwise forgotten to test for when writing a plugin from scratch. diff --git a/akka-docs/src/main/paradox/persistence-query-leveldb.md b/akka-docs/src/main/paradox/persistence-query-leveldb.md index 8f9e57e8c8..6066b5c87e 100644 --- a/akka-docs/src/main/paradox/persistence-query-leveldb.md +++ b/akka-docs/src/main/paradox/persistence-query-leveldb.md @@ -23,10 +23,10 @@ The `ReadJournal` is retrieved via the `akka.persistence.query.PersistenceQuery` extension: Scala -: @@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #get-read-journal } +: @@snip [LeveldbPersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #get-read-journal } Java -: @@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #get-read-journal } +: @@snip [LeveldbPersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #get-read-journal } ## Supported Queries @@ -36,10 +36,10 @@ Java identified by `persistenceId`. Scala -: @@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #EventsByPersistenceId } +: @@snip [LeveldbPersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #EventsByPersistenceId } Java -: @@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #EventsByPersistenceId } +: @@snip [LeveldbPersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #EventsByPersistenceId } You can retrieve a subset of all events by specifying `fromSequenceNr` and `toSequenceNr` or use `0L` and @scala[`Long.MaxValue`]@java[`Long.MAX_VALUE`] respectively to retrieve all events. Note that @@ -68,10 +68,10 @@ backend journal. `persistenceIds` is used for retrieving all `persistenceIds` of all persistent actors. Scala -: @@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #AllPersistenceIds } +: @@snip [LeveldbPersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #AllPersistenceIds } Java -: @@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #AllPersistenceIds } +: @@snip [LeveldbPersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #AllPersistenceIds } The returned event stream is unordered and you can expect different order for multiple executions of the query. @@ -93,19 +93,19 @@ backend journal. all domain events of an Aggregate Root type. Scala -: @@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #EventsByTag } +: @@snip [LeveldbPersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #EventsByTag } Java -: @@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #EventsByTag } +: @@snip [LeveldbPersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #EventsByTag } To tag events you create an @ref:[Event Adapters](persistence.md#event-adapters) that wraps the events in a `akka.persistence.journal.Tagged` with the given `tags`. Scala -: @@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #tagger } +: @@snip [LeveldbPersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #tagger } Java -: @@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #tagger } +: @@snip [LeveldbPersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #tagger } You can use `NoOffset` to retrieve all events with a given tag or retrieve a subset of all events by specifying a `Sequence` `offset`. The `offset` corresponds to an ordered sequence number for @@ -153,4 +153,4 @@ for the default `LeveldbReadJournal.Identifier`. It can be configured with the following properties: -@@snip [reference.conf]($akka$/akka-persistence-query/src/main/resources/reference.conf) { #query-leveldb } +@@snip [reference.conf](/akka-persistence-query/src/main/resources/reference.conf) { #query-leveldb } diff --git a/akka-docs/src/main/paradox/persistence-query.md b/akka-docs/src/main/paradox/persistence-query.md index 29716f6d8c..0a46158cdb 100644 --- a/akka-docs/src/main/paradox/persistence-query.md +++ b/akka-docs/src/main/paradox/persistence-query.md @@ -45,10 +45,10 @@ databases). For example, given a library that provides a `akka.persistence.query journal is as simple as: Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #basic-usage } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #basic-usage } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #basic-usage } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #basic-usage } Journal implementers are encouraged to put this identifier in a variable known to the user, such that one can access it via @scala[`readJournalFor[NoopJournal](NoopJournal.identifier)`]@java[`getJournalFor(NoopJournal.class, NoopJournal.identifier)`], however this is not enforced. @@ -78,18 +78,18 @@ By default this stream should be assumed to be a "live" stream, which means that persistence ids as they come into the system: Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #all-persistence-ids-live } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #all-persistence-ids-live } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #all-persistence-ids-live } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #all-persistence-ids-live } If your usage does not require a live stream, you can use the `currentPersistenceIds` query: Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #all-persistence-ids-snap } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #all-persistence-ids-snap } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #all-persistence-ids-snap } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #all-persistence-ids-snap } #### EventsByPersistenceIdQuery and CurrentEventsByPersistenceIdQuery @@ -98,10 +98,10 @@ however, since it is a stream it is possible to keep it alive and watch for addi persistent actor identified by the given `persistenceId`. Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #events-by-persistent-id } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #events-by-persistent-id } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #events-by-persistent-id } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #events-by-persistent-id } Most journals will have to revert to polling in order to achieve this, which can typically be configured with a `refresh-interval` configuration property. @@ -121,10 +121,10 @@ Some journals may support tagging of events via an @ref:[Event Adapters](persist how exactly this is implemented depends on the used journal. Here is an example of such a tagging event adapter: Scala -: @@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #tagger } +: @@snip [LeveldbPersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #tagger } Java -: @@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #tagger } +: @@snip [LeveldbPersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #tagger } @@@ note @@ -142,10 +142,10 @@ In the example below we query all events which have been tagged (we assume this tag - for example if the journal stored the events as json it may try to find those with the field `tag` set to this value etc.). Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #events-by-tag } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #events-by-tag } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #events-by-tag } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #events-by-tag } As you can see, we can use all the usual stream operators available from @ref:[Streams](stream/index.md) on the resulting query stream, including for example taking the first 10 and cancelling the stream. It is worth pointing out that the built-in `EventsByTag` @@ -166,24 +166,24 @@ is defined as the second type parameter of the returned `Source`, which allows j specialised query object, as demonstrated in the sample below: Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-types } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-types } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-types } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-types } Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-definition } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-definition } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-definition } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-definition } Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-usage } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-usage } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-usage } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-usage } ## Performance and denormalization @@ -215,10 +215,10 @@ If the read datastore exposes a [Reactive Streams](http://reactive-streams.org) is as simple as, using the read-journal and feeding it into the databases driver interface, for example like so: Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-rs } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-rs } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-rs } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-rs } ### Materialize view using mapAsync @@ -229,17 +229,17 @@ In case your write logic is state-less and you need to convert the events from o before writing into the alternative datastore, then the projection will look like this: Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-simple-classes } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-simple-classes } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-simple-classes } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-simple-classes } Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-simple } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-simple } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-simple } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-simple } ### Resumable projections @@ -252,17 +252,17 @@ you need to do some complex logic that would be best handled inside an Actor bef into the other datastore: Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-actor-run } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-actor-run } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-actor-run } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-actor-run } Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-actor } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-actor } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-actor } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-actor } ## Query plugins @@ -295,18 +295,18 @@ As illustrated below one of the implementations can delegate to the other. Below is a simple journal implementation: Scala -: @@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #my-read-journal } +: @@snip [PersistenceQueryDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #my-read-journal } Java -: @@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #my-read-journal } +: @@snip [PersistenceQueryDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java) { #my-read-journal } And the `eventsByTag` could be backed by such an Actor for example: Scala -: @@snip [MyEventsByTagPublisher.scala]($code$/scala/docs/persistence/query/MyEventsByTagPublisher.scala) { #events-by-tag-publisher } +: @@snip [MyEventsByTagPublisher.scala](/akka-docs/src/test/scala/docs/persistence/query/MyEventsByTagPublisher.scala) { #events-by-tag-publisher } Java -: @@snip [MyEventsByTagJavaPublisher.java]($code$/java/jdocs/persistence/query/MyEventsByTagJavaPublisher.java) { #events-by-tag-publisher } +: @@snip [MyEventsByTagJavaPublisher.java](/akka-docs/src/test/java/jdocs/persistence/query/MyEventsByTagJavaPublisher.java) { #events-by-tag-publisher } The `ReadJournalProvider` class must have a constructor with one of these signatures: diff --git a/akka-docs/src/main/paradox/persistence-schema-evolution.md b/akka-docs/src/main/paradox/persistence-schema-evolution.md index 69adc835b9..c9f7278aec 100644 --- a/akka-docs/src/main/paradox/persistence-schema-evolution.md +++ b/akka-docs/src/main/paradox/persistence-schema-evolution.md @@ -168,22 +168,22 @@ For more in-depth explanations on how serialization picks the serializer to use First we start by defining our domain model class, here representing a person: Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-model } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-model } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #simplest-custom-serializer-model } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #simplest-custom-serializer-model } Next we implement a serializer (or extend an existing one to be able to handle the new `Person` class): Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #simplest-custom-serializer } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #simplest-custom-serializer } And finally we register the serializer and bind it to handle the `docs.persistence.Person` class: -@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-config } +@@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-config } Deserialization will be performed by the same serializer which serialized the message initially because of the `identifier` being stored together with the message. @@ -219,16 +219,16 @@ values somehow. This is usually modeled as some kind of default value, or by rep See below for an example how reading an optional field from a serialized protocol buffers message might look like. Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-read-optional-model } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-read-optional-model } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #protobuf-read-optional-model } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #protobuf-read-optional-model } Next we prepare an protocol definition using the protobuf Interface Description Language, which we'll use to generate the serializer code to be used on the Akka Serialization layer (notice that the schema aproach allows us to rename fields, as long as the numeric identifiers of the fields do not change): -@@snip [FlightAppModels.proto]($code$/../main/protobuf/FlightAppModels.proto) { #protobuf-read-optional-proto } +@@snip [FlightAppModels.proto](/akka-docs/src/test/../main/protobuf/FlightAppModels.proto) { #protobuf-read-optional-proto } The serializer implementation uses the protobuf generated classes to marshall the payloads. Optional fields can be handled explicitly or missing values by calling the `has...` methods on the protobuf object, @@ -236,10 +236,10 @@ which we do for `seatType` in order to use a `Unknown` type in case the event wa the field to this event type: Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-read-optional } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-read-optional } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #protobuf-read-optional } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #protobuf-read-optional } ### Rename fields @@ -265,7 +265,7 @@ add the overhead of having to maintain the schema. When using serializers like t This is how such a rename would look in protobuf: -@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-rename-proto } +@@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-rename-proto } It is important to learn about the strengths and limitations of your serializers, in order to be able to move swiftly and refactor your models fearlessly as you go on with the project. @@ -294,10 +294,10 @@ or using a library like @scala[[Stamina](https://github.com/scalapenos/stamina)] The following snippet showcases how one could apply renames if working with plain JSON (using @scala[`spray.json.JsObject`]@java[a `JsObject` as an example JSON representation]): Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #rename-plain-json } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #rename-plain-json } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #rename-plain-json } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #rename-plain-json } As you can see, manually handling renames induces some boilerplate onto the EventAdapter, however much of it you will find is common infrastructure code that can be either provided by an external library (for promotion management) @@ -363,19 +363,19 @@ Other events (**E**) can just be passed through. The serializer detects that the string manifest points to a removed event type and skips attempting to deserialize it: Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #string-serializer-skip-deleved-event-by-manifest } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #string-serializer-skip-deleved-event-by-manifest } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #string-serializer-skip-deleved-event-by-manifest } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #string-serializer-skip-deleved-event-by-manifest } The EventAdapter we implemented is aware of `EventDeserializationSkipped` events (our "Tombstones"), and emits and empty `EventSeq` whenever such object is encoutered: Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #string-serializer-skip-deleved-event-by-manifest-adapter } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #string-serializer-skip-deleved-event-by-manifest-adapter } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #string-serializer-skip-deleved-event-by-manifest-adapter } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #string-serializer-skip-deleved-event-by-manifest-adapter } ### Detach domain model from data model @@ -405,20 +405,20 @@ include additional data for the event (e.g. tags), for ease of later querying. We will use the following domain and data models to showcase how the separation can be implemented by the adapter: Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models } The `EventAdapter` takes care of converting from one model to the other one (in both directions), allowing the models to be completely detached from each other, such that they can be optimised independently as long as the mapping logic is able to convert between them: Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models-adapter } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models-adapter } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models-adapter } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models-adapter } The same technique could also be used directly in the Serializer if the end result of marshalling is bytes. Then the serializer can simply convert the bytes do the domain object by using the generated protobuf builders. @@ -441,10 +441,10 @@ The journal plugin notices that the incoming event type is JSON (for example by event) and stores the incoming object directly. Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models-adapter-json } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models-adapter-json } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models-adapter-json } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models-adapter-json } @@@ note @@ -500,10 +500,10 @@ and the address change is handled similarly: Scala -: @@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #split-events-during-recovery } +: @@snip [PersistenceSchemaEvolutionDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #split-events-during-recovery } Java -: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #split-events-during-recovery } +: @@snip [PersistenceSchemaEvolutionDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #split-events-during-recovery } By returning an `EventSeq` from the event adapter, the recovered event can be converted to multiple events before being delivered to the persistent actor. diff --git a/akka-docs/src/main/paradox/persistence.md b/akka-docs/src/main/paradox/persistence.md index 577dae7f42..fbeef9c6fb 100644 --- a/akka-docs/src/main/paradox/persistence.md +++ b/akka-docs/src/main/paradox/persistence.md @@ -95,10 +95,10 @@ Akka persistence supports event sourcing with the @scala[`PersistentActor` trait is defined by implementing @scala[`receiveRecover`]@java[`createReceiveRecover`] and @scala[`receiveCommand`]@java[`createReceive`]. This is demonstrated in the following example. Scala -: @@snip [PersistentActorExample.scala]($code$/scala/docs/persistence/PersistentActorExample.scala) { #persistent-actor-example } +: @@snip [PersistentActorExample.scala](/akka-docs/src/test/scala/docs/persistence/PersistentActorExample.scala) { #persistent-actor-example } Java -: @@snip [PersistentActorExample.java]($code$/java/jdocs/persistence/PersistentActorExample.java) { #persistent-actor-example } +: @@snip [PersistentActorExample.java](/akka-docs/src/test/java/jdocs/persistence/PersistentActorExample.java) { #persistent-actor-example } The example defines two data types, `Cmd` and `Evt` to represent commands and events, respectively. The `state` of the `ExamplePersistentActor` is a list of persisted event data contained in `ExampleState`. @@ -151,10 +151,10 @@ A persistent actor must have an identifier that doesn't change across different The identifier must be defined with the `persistenceId` method. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #persistence-id-override } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #persistence-id-override } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #persistence-id-override } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #persistence-id-override } @@@ note @@ -199,10 +199,10 @@ This can be useful if snapshot serialization format has changed in an incompatib It should typically not be used when events have been deleted. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-no-snap } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-no-snap } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-no-snap } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-no-snap } Another possible recovery customization, which can be useful for debugging, is setting an upper bound on the replay, causing the actor to be replayed only up to a certain point "in the past" (instead of being replayed to its most up to date state). Note that after that it is a bad idea to persist new @@ -210,28 +210,28 @@ events because a later recovery will probably be confused by the new events that events that were previously skipped. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-custom } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-custom } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-custom } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-custom } Recovery can be disabled by returning `Recovery.none()` in the `recovery` method of a `PersistentActor`: Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-disabled } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-disabled } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-disabled } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-disabled } #### Recovery status A persistent actor can query its own recovery status via the methods Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-status } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-status } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-status } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-status } Sometimes there is a need for performing additional initialization when the recovery has completed before processing any other message sent to the persistent actor. @@ -239,10 +239,10 @@ The persistent actor will receive a special `RecoveryCompleted` message right af and before any other received messages. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-completed } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-completed } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-completed } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-completed } The actor will always receive a `RecoveryCompleted` message, even if there are no events in the journal and the snapshot store is empty, or if it's a new persistent actor with a previously @@ -325,10 +325,10 @@ In the below example, the event callbacks may be called "at any time", even afte The ordering between events is still guaranteed ("evt-b-1" will be sent after "evt-a-2", which will be sent after "evt-a-1" etc.). Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #persist-async } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #persist-async } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #persist-async } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #persist-async } @@@ note @@ -357,10 +357,10 @@ Using those methods is very similar to the persist family of methods, yet they d It will be kept in memory and used when invoking the handler. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #defer } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #defer } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer } Notice that the `sender()` is **safe** to access in the handler callback, and will be pointing to the original sender of the command for which this `defer` or `deferAsync` handler was called. @@ -368,18 +368,18 @@ of the command for which this `defer` or `deferAsync` handler was called. The calling side will get the responses in this (guaranteed) order: Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #defer-caller } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #defer-caller } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer-caller } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer-caller } You can also call `defer` or `deferAsync` with `persist`. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #defer-with-persist } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #defer-with-persist } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer-with-persist } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer-with-persist } @@@ warning @@ -400,18 +400,18 @@ those situations, as well as their implication on the stashing behavior (that `p example two persist calls are issued, and each of them issues another persist inside its callback: Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persist-persist } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persist-persist } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persist-persist } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persist-persist } When sending two commands to this `PersistentActor`, the persist handlers will be executed in the following order: Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persist-persist-caller } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persist-persist-caller } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persist-persist-caller } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persist-persist-caller } First the "outer layer" of persist calls is issued and their callbacks are applied. After these have successfully completed, the inner callbacks will be invoked (once the events they are persisting have been confirmed to be persisted by the journal). @@ -422,18 +422,18 @@ is extended until all nested `persist` callbacks have been handled. It is also possible to nest `persistAsync` calls, using the same pattern: Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persistAsync-persistAsync } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persistAsync-persistAsync } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persistAsync-persistAsync } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persistAsync-persistAsync } In this case no stashing is happening, yet events are still persisted and callbacks are executed in the expected order: Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persistAsync-persistAsync-caller } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persistAsync-persistAsync-caller } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persistAsync-persistAsync-caller } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persistAsync-persistAsync-caller } While it is possible to nest mixed `persist` and `persistAsync` with keeping their respective semantics it is not a recommended practice, as it may lead to overly complex nesting. @@ -461,10 +461,10 @@ actor and after a back-off timeout start it again. The `akka.pattern.BackoffSupe is provided to support such restarts. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #backoff } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #backoff } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #backoff } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #backoff } If persistence of an event is rejected before it is stored, e.g. due to serialization error, `onPersistRejected` will be invoked (logging a warning by default), and the actor continues with @@ -580,24 +580,24 @@ The example below highlights how messages arrive in the Actor's mailbox and how mechanism when `persist()` is used. Notice the early stop behavior that occurs when `PoisonPill` is used: Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown } Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown-example-bad } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown-example-bad } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown-example-bad } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown-example-bad } Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown-example-good } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown-example-good } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown-example-good } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown-example-good } ### Replay Filter @@ -640,23 +640,23 @@ Persistent actors can save snapshots of internal state by calling the `saveSnap succeeds, the persistent actor receives a `SaveSnapshotSuccess` message, otherwise a `SaveSnapshotFailure` message Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #save-snapshot } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #save-snapshot } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #save-snapshot } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #save-snapshot } where `metadata` is of type `SnapshotMetadata`: -@@snip [SnapshotProtocol.scala]($akka$/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala) { #snapshot-metadata } +@@snip [SnapshotProtocol.scala](/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala) { #snapshot-metadata } During recovery, the persistent actor is offered a previously saved snapshot via a `SnapshotOffer` message from which it can initialize internal state. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #snapshot-offer } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #snapshot-offer } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #snapshot-offer } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #snapshot-offer } The replayed messages that follow the `SnapshotOffer` message, if any, are younger than the offered snapshot. They finally recover the persistent actor to its current (i.e. latest) state. @@ -665,10 +665,10 @@ In general, a persistent actor is only offered a snapshot if that persistent act and at least one of these snapshots matches the `SnapshotSelectionCriteria` that can be specified for recovery. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #snapshot-criteria } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #snapshot-criteria } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #snapshot-criteria } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #snapshot-criteria } If not specified, they default to @scala[`SnapshotSelectionCriteria.Latest`]@java[`SnapshotSelectionCriteria.latest()`] which selects the latest (= youngest) snapshot. To disable snapshot-based recovery, applications should use @scala[`SnapshotSelectionCriteria.None`]@java[`SnapshotSelectionCriteria.none()`]. A recovery where no @@ -786,10 +786,10 @@ of the message, the destination actor will send the same``deliveryId`` wrapped i The sender will then use it to call `confirmDelivery` method to complete the delivery routine. Scala -: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #at-least-once-example } +: @@snip [PersistenceDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala) { #at-least-once-example } Java -: @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #at-least-once-example } +: @@snip [LambdaPersistenceDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #at-least-once-example } The `deliveryId` generated by the persistence module is a strictly monotonically increasing sequence number without gaps. The same sequence is used for all destinations of the actor, i.e. when sending to multiple @@ -864,14 +864,14 @@ json instead of serializing the object to its binary representation. Implementing an EventAdapter is rather stright forward: Scala -: @@snip [PersistenceEventAdapterDocSpec.scala]($code$/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #identity-event-adapter } +: @@snip [PersistenceEventAdapterDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #identity-event-adapter } Java -: @@snip [PersistenceEventAdapterDocTest.java]($code$/java/jdocs/persistence/PersistenceEventAdapterDocTest.java) { #identity-event-adapter } +: @@snip [PersistenceEventAdapterDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceEventAdapterDocTest.java) { #identity-event-adapter } Then in order for it to be used on events coming to and from the journal you must bind it using the below configuration syntax: -@@snip [PersistenceEventAdapterDocSpec.scala]($code$/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #event-adapters-config } +@@snip [PersistenceEventAdapterDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #event-adapters-config } It is possible to bind multiple adapters to one class *for recovery*, in which case the `fromJournal` methods of all bound adapters will be applied to a given matching event (in order of definition in the configuration). Since each adapter may @@ -913,10 +913,10 @@ Applications can provide their own plugins by implementing a plugin API and acti Plugin development requires the following imports: Scala -: @@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #plugin-imports } +: @@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #plugin-imports } Java -: @@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #plugin-imports } +: @@snip [LambdaPersistencePluginDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #plugin-imports } ### Eager initialization of persistence plugin @@ -958,7 +958,7 @@ akka { The LevelDB journal plugin config entry is `akka.persistence.journal.leveldb`. It writes messages to a local LevelDB instance. Enable this plugin by defining config property: -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-plugin-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-plugin-config } LevelDB based plugins will also require the following additional dependency declaration: @@ -971,7 +971,7 @@ LevelDB based plugins will also require the following additional dependency decl The default location of LevelDB files is a directory named `journal` in the current working directory. This location can be changed by configuration where the specified path can be relative or absolute: -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-config } With this plugin, each actor system runs its own private LevelDB instance. @@ -980,7 +980,7 @@ a "tombstone" for each deleted message instead. In the case of heavy journal usa deletes, this may be an issue as users may find themselves dealing with continuously increasing journal sizes. To this end, LevelDB offers a special journal compaction function that is exposed via the following configuration: -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #compaction-intervals-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #compaction-intervals-config } ### Shared LevelDB journal @@ -1005,29 +1005,29 @@ This plugin has been supplanted by [Persistence Plugin Proxy](#persistence-plugi A shared LevelDB instance is started by instantiating the `SharedLeveldbStore` actor. Scala -: @@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-creation } +: @@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-creation } Java -: @@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #shared-store-creation } +: @@snip [LambdaPersistencePluginDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #shared-store-creation } By default, the shared instance writes journaled messages to a local directory named `journal` in the current working directory. The storage location can be changed by configuration: -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-config } Actor systems that use a shared LevelDB store must activate the `akka.persistence.journal.leveldb-shared` plugin. -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-journal-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-journal-config } This plugin must be initialized by injecting the (remote) `SharedLeveldbStore` actor reference. Injection is done by calling the `SharedLeveldbJournal.setStore` method with the actor reference as argument. Scala -: @@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-usage } +: @@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-usage } Java -: @@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #shared-store-usage } +: @@snip [LambdaPersistencePluginDocTest.java](/akka-docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #shared-store-usage } Internal journal commands (sent by persistent actors) are buffered until injection completes. Injection is idempotent i.e. only the first injection is used. @@ -1038,12 +1038,12 @@ i.e. only the first injection is used. The local snapshot store plugin config entry is `akka.persistence.snapshot-store.local`. It writes snapshot files to the local filesystem. Enable this plugin by defining config property: -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-snapshot-plugin-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-snapshot-plugin-config } The default storage location is a directory named `snapshots` in the current working directory. This can be changed by configuration where the specified path can be relative or absolute: -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-config } Note that it is not mandatory to specify a snapshot store plugin. If you don't use snapshots you don't have to configure it. @@ -1097,7 +1097,7 @@ Serialization of snapshots and payloads of `Persistent` messages is configurable it must add -@@snip [PersistenceSerializerDocSpec.scala]($code$/scala/docs/persistence/PersistenceSerializerDocSpec.scala) { #custom-serializer-config } +@@snip [PersistenceSerializerDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala) { #custom-serializer-config } to the application configuration. If not specified, a default serializer is used. @@ -1107,11 +1107,11 @@ For more advanced schema evolution techniques refer to the @ref:[Persistence - S When running tests with LevelDB default settings in `sbt`, make sure to set `fork := true` in your sbt project. Otherwise, you'll see an `UnsatisfiedLinkError`. Alternatively, you can switch to a LevelDB Java port by setting -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #native-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #native-config } or -@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-native-config } +@@snip [PersistencePluginDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-native-config } in your Akka configuration. The LevelDB Java port is for testing purposes only. @@ -1144,29 +1144,29 @@ to the @ref:[reference configuration](general/configuration.md#config-akka-persi By default, a persistent actor will use the "default" journal and snapshot store plugins configured in the following sections of the `reference.conf` configuration resource: -@@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-config } +@@snip [PersistenceMultiDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-config } Note that in this case the actor overrides only the `persistenceId` method: Scala -: @@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-plugins } +: @@snip [PersistenceMultiDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-plugins } Java -: @@snip [PersistenceMultiDocTest.java]($code$/java/jdocs/persistence/PersistenceMultiDocTest.java) { #default-plugins } +: @@snip [PersistenceMultiDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceMultiDocTest.java) { #default-plugins } When the persistent actor overrides the `journalPluginId` and `snapshotPluginId` methods, the actor will be serviced by these specific persistence plugins instead of the defaults: Scala -: @@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #override-plugins } +: @@snip [PersistenceMultiDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #override-plugins } Java -: @@snip [PersistenceMultiDocTest.java]($code$/java/jdocs/persistence/PersistenceMultiDocTest.java) { #override-plugins } +: @@snip [PersistenceMultiDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceMultiDocTest.java) { #override-plugins } Note that `journalPluginId` and `snapshotPluginId` must refer to properly configured `reference.conf` plugin entries with a standard `class` property as well as settings which are specific for those plugins, i.e.: -@@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #override-config } +@@snip [PersistenceMultiDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #override-config } ## Give persistence plugin configurations at runtime @@ -1177,10 +1177,10 @@ the actor will use the declared `Config` objects with a fallback on the default It allows a dynamic configuration of the journal and the snapshot store at runtime: Scala -: @@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #runtime-config } +: @@snip [PersistenceMultiDocSpec.scala](/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #runtime-config } Java -: @@snip [PersistenceMultiDocTest.java]($code$/java/jdocs/persistence/PersistenceMultiDocTest.java) { #runtime-config } +: @@snip [PersistenceMultiDocTest.java](/akka-docs/src/test/java/jdocs/persistence/PersistenceMultiDocTest.java) { #runtime-config } ## See also diff --git a/akka-docs/src/main/paradox/remoting-artery.md b/akka-docs/src/main/paradox/remoting-artery.md index 73ee0ade82..ae8b186add 100644 --- a/akka-docs/src/main/paradox/remoting-artery.md +++ b/akka-docs/src/main/paradox/remoting-artery.md @@ -260,10 +260,10 @@ which in this sample corresponds to `sampleActorSystem@127.0.0.1:2553`. Once you have configured the properties above you would do the following in code: Scala -: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } +: @@snip [RemoteDeploymentDocSpec.scala](/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } Java -: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } +: @@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } The actor class `SampleActor` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. @@ -300,26 +300,26 @@ precedence. With these imports: Scala -: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } +: @@snip [RemoteDeploymentDocSpec.scala](/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } Java -: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } +: @@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } and a remote address like this: Scala -: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address-artery } +: @@snip [RemoteDeploymentDocSpec.scala](/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address-artery } Java -: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address-artery } +: @@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address-artery } you can advise the system to create a child on that remote node like so: Scala -: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } +: @@snip [RemoteDeploymentDocSpec.scala](/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } Java -: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } +: @@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } ### Remote deployment whitelist @@ -334,7 +334,7 @@ The list of allowed classes has to be configured on the "remote" system, in othe others will be attempting to remote deploy Actors. That system, locally, knows best which Actors it should or should not allow others to remote deploy onto it. The full settings section may for example look like this: -@@snip [RemoteDeploymentWhitelistSpec.scala]($akka$/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } +@@snip [RemoteDeploymentWhitelistSpec.scala](/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } Actor classes not included in the whitelist will not be allowed to be remote deployed onto this system. @@ -662,10 +662,10 @@ remained the same, we recommend reading the @ref:[Serialization](serialization.m Implementing an `akka.serialization.ByteBufferSerializer` works the same way as any other serializer, Scala -: @@snip [Serializer.scala]($akka$/akka-actor/src/main/scala/akka/serialization/Serializer.scala) { #ByteBufferSerializer } +: @@snip [Serializer.scala](/akka-actor/src/main/scala/akka/serialization/Serializer.scala) { #ByteBufferSerializer } Java -: @@snip [ByteBufferSerializerDocTest.java]($code$/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #ByteBufferSerializer-interface } +: @@snip [ByteBufferSerializerDocTest.java](/akka-docs/src/test/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #ByteBufferSerializer-interface } Implementing a serializer for Artery is therefore as simple as implementing this interface, and binding the serializer as usual (which is explained in @ref:[Serialization](serialization.md)). @@ -677,10 +677,10 @@ The array based methods will be used when `ByteBuffer` is not used, e.g. in Akka Note that the array based methods can be implemented by delegation like this: Scala -: @@snip [ByteBufferSerializerDocSpec.scala]($code$/scala/docs/actor/ByteBufferSerializerDocSpec.scala) { #bytebufserializer-with-manifest } +: @@snip [ByteBufferSerializerDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala) { #bytebufserializer-with-manifest } Java -: @@snip [ByteBufferSerializerDocTest.java]($code$/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #bytebufserializer-with-manifest } +: @@snip [ByteBufferSerializerDocTest.java](/akka-docs/src/test/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #bytebufserializer-with-manifest } ### Disabling the Java Serializer @@ -693,14 +693,14 @@ It is absolutely feasible to combine remoting with @ref:[Routing](routing.md). A pool of remote deployed routees can be configured as: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool-artery } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool-artery } This configuration setting will clone the actor defined in the `Props` of the `remotePool` 10 times and deploy it evenly distributed across the two given target nodes. A group of remote actors can be configured as: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group-artery } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group-artery } This configuration setting will send messages to the defined remote actor paths. It requires that you create the destination actors on the remote nodes with matching paths. @@ -909,7 +909,7 @@ There are lots of configuration properties that are related to remoting in Akka. Setting properties like the listening IP and port number programmatically is best done by using something like the following: -@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic-artery } +@@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic-artery } @@@ diff --git a/akka-docs/src/main/paradox/remoting.md b/akka-docs/src/main/paradox/remoting.md index eb217ddcc8..acfae89980 100644 --- a/akka-docs/src/main/paradox/remoting.md +++ b/akka-docs/src/main/paradox/remoting.md @@ -164,10 +164,10 @@ which in this sample corresponds to `sampleActorSystem@127.0.0.1:2553`. Once you have configured the properties above you would do the following in code: Scala -: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } +: @@snip [RemoteDeploymentDocSpec.scala](/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } Java -: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } +: @@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } The actor class `SampleActor` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. @@ -209,26 +209,26 @@ precedence. With these imports: Scala -: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } +: @@snip [RemoteDeploymentDocSpec.scala](/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } Java -: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } +: @@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } and a remote address like this: Scala -: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address } +: @@snip [RemoteDeploymentDocSpec.scala](/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address } Java -: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address } +: @@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address } you can advise the system to create a child on that remote node like so: Scala -: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } +: @@snip [RemoteDeploymentDocSpec.scala](/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } Java -: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } +: @@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } ### Remote deployment whitelist @@ -244,7 +244,7 @@ The list of allowed classes has to be configured on the "remote" system, in othe others will be attempting to remote deploy Actors. That system, locally, knows best which Actors it should or should not allow others to remote deploy onto it. The full settings section may for example look like this: -@@snip [RemoteDeploymentWhitelistSpec.scala]($akka$/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } +@@snip [RemoteDeploymentWhitelistSpec.scala](/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } Actor classes not included in the whitelist will not be allowed to be remote deployed onto this system. @@ -345,14 +345,14 @@ It is absolutely feasible to combine remoting with @ref:[Routing](routing.md). A pool of remote deployed routees can be configured as: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool } This configuration setting will clone the actor defined in the `Props` of the `remotePool` 10 times and deploy it evenly distributed across the two given target nodes. A group of remote actors can be configured as: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } This configuration setting will send messages to the defined remote actor paths. It requires that you create the destination actors on the remote nodes with matching paths. @@ -587,7 +587,7 @@ There are lots of configuration properties that are related to remoting in Akka. Setting properties like the listening IP and port number programmatically is best done by using something like the following: -@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic } +@@snip [RemoteDeploymentDocTest.java](/akka-docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic } @@@ diff --git a/akka-docs/src/main/paradox/routing.md b/akka-docs/src/main/paradox/routing.md index 1ed7fab9cd..bce4ce16de 100644 --- a/akka-docs/src/main/paradox/routing.md +++ b/akka-docs/src/main/paradox/routing.md @@ -26,10 +26,10 @@ also possible to [create your own](#custom-router). The following example illustrates how to use a `Router` and manage the routees from within an actor. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #router-in-actor } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #router-in-actor } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #router-in-actor } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #router-in-actor } We create a `Router` and specify that it should use `RoundRobinRoutingLogic` when routing the messages to the routees. @@ -97,22 +97,22 @@ few exceptions. These are documented in the [Specially Handled Messages](#router The following code and configuration snippets show how to create a [round-robin](#round-robin-router) router that forwards messages to five `Worker` routees. The routees will be created as the router's children. -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-1 } Here is the same example, but with the router configuration provided programmatically instead of from configuration. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-2 } #### Remote Deployed Routees @@ -123,10 +123,10 @@ fashion. In order to deploy routees remotely, wrap the router configuration in a deployment requires the `akka-remote` module to be included in the classpath. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #remoteRoutees } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #remoteRoutees } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #remoteRoutees } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #remoteRoutees } #### Senders @@ -134,20 +134,20 @@ By default, when a routee sends a message, it will @ref:[implicitly set itself a ](actors.md#actors-tell-sender). Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #reply-without-sender } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #reply-without-sender } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #reply-with-self } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #reply-with-self } However, it is often useful for routees to set the *router* as a sender. For example, you might want to set the router as the sender if you want to hide the details of the routees behind the router. The following code snippet shows how to set the parent router as sender. Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #reply-with-sender } +: @@snip [ActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala) { #reply-with-sender } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #reply-with-parent } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #reply-with-parent } #### Supervision @@ -176,10 +176,10 @@ by specifying the strategy when defining the router. Setting the strategy is done like this: Scala -: @@snip [RoutingSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala) { #supervision } +: @@snip [RoutingSpec.scala](/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala) { #supervision } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #supervision } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #supervision } @@@ note @@ -200,42 +200,42 @@ to these paths, wildcards can be and will result in the same @ref:[semantics as The example below shows how to create a router by providing it with the path strings of three routee actors. -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-group-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #round-robin-group-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-group-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #round-robin-group-1 } Here is the same example, but with the router configuration provided programmatically instead of from configuration. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #round-robin-group-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #paths #round-robin-group-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #round-robin-group-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #paths #round-robin-group-2 } The routee actors are created externally from the router: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #create-workers } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #create-workers } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #create-workers } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #create-workers } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #create-worker-actors } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #create-worker-actors } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #create-worker-actors } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #create-worker-actors } The paths may contain protocol and address information for actors running on remote hosts. Remoting requires the `akka-remote` module to be included in the classpath. -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } ## Router usage @@ -246,10 +246,10 @@ Note that deployment paths in the configuration starts with `/parent/` followed of the router actor. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #create-parent } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #create-parent } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #create-parent } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #create-parent } ### RoundRobinPool and RoundRobinGroup @@ -258,39 +258,39 @@ Routes in a [round-robin](http://en.wikipedia.org/wiki/Round-robin) fashion to i RoundRobinPool defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-1 } RoundRobinPool defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-2 } RoundRobinGroup defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-group-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #round-robin-group-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-group-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #round-robin-group-1 } RoundRobinGroup defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #round-robin-group-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #paths #round-robin-group-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #round-robin-group-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #paths #round-robin-group-2 } ### RandomPool and RandomGroup @@ -298,39 +298,39 @@ This router type selects one of its routees randomly for each message. RandomPool defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-random-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-random-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #random-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #random-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #random-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #random-pool-1 } RandomPool defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #random-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #random-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #random-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #random-pool-2 } RandomGroup defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-random-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-random-group } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #random-group-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #random-group-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #random-group-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #random-group-1 } RandomGroup defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #random-group-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #paths #random-group-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #random-group-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #paths #random-group-2 } ### BalancingPool @@ -362,27 +362,27 @@ as described in [Specially Handled Messages](#router-special-messages). BalancingPool defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #balancing-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #balancing-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #balancing-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #balancing-pool-1 } BalancingPool defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #balancing-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #balancing-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #balancing-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #balancing-pool-2 } Addition configuration for the balancing dispatcher, which is used by the pool, can be configured in the `pool-dispatcher` section of the router deployment configuration. -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool2 } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool2 } The `BalancingPool` automatically uses a special `BalancingDispatcher` for its routees - disregarding any dispatcher that is set on the routee Props object. @@ -395,14 +395,14 @@ can be configured as explained in @ref:[Dispatchers](dispatchers.md). In situati routees are expected to perform blocking operations it may be useful to replace it with a `thread-pool-executor` hinting the number of allocated threads explicitly: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool3 } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool3 } It is also possible to change the `mailbox` used by the balancing dispatcher for scenarios where the default unbounded mailbox is not well suited. An example of such a scenario could arise whether there exists the need to manage priority for each message. You can then implement a priority mailbox and configure your dispatcher: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool4 } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool4 } @@@ note @@ -428,21 +428,21 @@ since their mailbox size is unknown SmallestMailboxPool defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-smallest-mailbox-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-smallest-mailbox-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #smallest-mailbox-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #smallest-mailbox-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #smallest-mailbox-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #smallest-mailbox-pool-1 } SmallestMailboxPool defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #smallest-mailbox-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #smallest-mailbox-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #smallest-mailbox-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #smallest-mailbox-pool-2 } There is no Group variant of the SmallestMailboxPool because the size of the mailbox and the internal dispatching state of the actor is not practically available from the paths @@ -454,41 +454,41 @@ A broadcast router forwards the message it receives to *all* its routees. BroadcastPool defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-broadcast-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-broadcast-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcast-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #broadcast-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcast-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #broadcast-pool-1 } BroadcastPool defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcast-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #broadcast-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcast-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #broadcast-pool-2 } BroadcastGroup defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-broadcast-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-broadcast-group } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcast-group-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #broadcast-group-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcast-group-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #broadcast-group-1 } BroadcastGroup defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #broadcast-group-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #paths #broadcast-group-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #broadcast-group-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #paths #broadcast-group-2 } @@@ note @@ -509,41 +509,41 @@ It is expecting at least one reply within a configured duration, otherwise it wi ScatterGatherFirstCompletedPool defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-pool-1 } ScatterGatherFirstCompletedPool defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-pool-2 } ScatterGatherFirstCompletedGroup defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-group } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-group-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-group-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-group-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-group-1 } ScatterGatherFirstCompletedGroup defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #scatter-gather-group-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #paths #scatter-gather-group-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #scatter-gather-group-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #paths #scatter-gather-group-2 } ### TailChoppingPool and TailChoppingGroup @@ -559,39 +559,39 @@ This optimisation was described nicely in a blog post by Peter Bailis: TailChoppingPool defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-pool-1 } TailChoppingPool defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-pool-2 } TailChoppingGroup defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-group } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-group-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-group-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-group-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-group-1 } TailChoppingGroup defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #tail-chopping-group-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #paths #tail-chopping-group-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #tail-chopping-group-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #paths #tail-chopping-group-2 } ### ConsistentHashingPool and ConsistentHashingGroup @@ -618,17 +618,17 @@ the same time for one router. The @scala[`hashMapping`]@java[`withHashMapper`] i Code example: Scala -: @@snip [ConsistentHashingRouterDocSpec.scala]($code$/scala/docs/routing/ConsistentHashingRouterDocSpec.scala) { #cache-actor } +: @@snip [ConsistentHashingRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala) { #cache-actor } Java -: @@snip [ConsistentHashingRouterDocTest.java]($code$/java/jdocs/routing/ConsistentHashingRouterDocTest.java) { #cache-actor } +: @@snip [ConsistentHashingRouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/ConsistentHashingRouterDocTest.java) { #cache-actor } Scala -: @@snip [ConsistentHashingRouterDocSpec.scala]($code$/scala/docs/routing/ConsistentHashingRouterDocSpec.scala) { #consistent-hashing-router } +: @@snip [ConsistentHashingRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala) { #consistent-hashing-router } Java -: @@snip [ConsistentHashingRouterDocTest.java]($code$/java/jdocs/routing/ConsistentHashingRouterDocTest.java) { #consistent-hashing-router } +: @@snip [ConsistentHashingRouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/ConsistentHashingRouterDocTest.java) { #consistent-hashing-router } In the above example you see that the `Get` message implements `ConsistentHashable` itself, while the `Entry` message is wrapped in a `ConsistentHashableEnvelope`. The `Evict` @@ -636,39 +636,39 @@ message is handled by the `hashMapping` partial function. ConsistentHashingPool defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-pool-1 } ConsistentHashingPool defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-pool-2 } ConsistentHashingGroup defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-group } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-group } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-group-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-group-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-group-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-group-1 } ConsistentHashingGroup defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #consistent-hashing-group-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #paths #consistent-hashing-group-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #consistent-hashing-group-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #paths #consistent-hashing-group-2 } `virtual-nodes-factor` is the number of virtual nodes per routee that is used in the consistent hash node ring to make the distribution more uniform. @@ -694,10 +694,10 @@ The example below shows how you would use a `Broadcast` message to send a very i to every routee of a router. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcastDavyJonesWarning } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #broadcastDavyJonesWarning } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcastDavyJonesWarning } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #broadcastDavyJonesWarning } In this example the router receives the `Broadcast` message, extracts its payload (`"Watch out for Davy Jones' locker"`), and then sends the payload on to all of the router's @@ -718,10 +718,10 @@ receives a `PoisonPill` message, that actor will be stopped. See the @ref:[Poiso documentation for details. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #poisonPill } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #poisonPill } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #poisonPill } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #poisonPill } For a router, which normally passes on messages to routees, it is important to realise that `PoisonPill` messages are processed by the router only. `PoisonPill` messages sent to a router @@ -740,10 +740,10 @@ routee will receive the `PoisonPill` message. Note that this will stop all route routees aren't children of the router, i.e. even routees programmatically provided to the router. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcastPoisonPill } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #broadcastPoisonPill } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcastPoisonPill } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #broadcastPoisonPill } With the code shown above, each routee will receive a `PoisonPill` message. Each routee will continue to process its messages as normal, eventually processing the `PoisonPill`. This will @@ -771,10 +771,10 @@ supervision directive that is applied to the router. Routees that are not the ro those that were created externally to the router, will not be affected. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #kill } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #kill } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #kill } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #kill } As with the `PoisonPill` message, there is a distinction between killing a router, which indirectly kills its children (who happen to be routees), and killing routees directly (some of whom @@ -782,10 +782,10 @@ may not be children.) To kill routees directly the router should be sent a `Kill in a `Broadcast` message. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcastKill } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #broadcastKill } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcastKill } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #broadcastKill } ### Management Messages @@ -817,13 +817,13 @@ pressure is lower than certain threshold. Both thresholds are configurable. Pool with default resizer defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-resize-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-resize-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #resize-pool-1 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #resize-pool-1 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #resize-pool-1 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #resize-pool-1 } Several more configuration options are available and described in `akka.actor.deployment.default.resizer` section of the reference @ref:[configuration](general/configuration.md). @@ -831,10 +831,10 @@ section of the reference @ref:[configuration](general/configuration.md). Pool with resizer defined in code: Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #resize-pool-2 } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #resize-pool-2 } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #resize-pool-2 } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #resize-pool-2 } *It is also worth pointing out that if you define the ``router`` in the configuration file then this value will be used instead of any programmatically sent parameters.* @@ -867,13 +867,13 @@ The memory usage is O(n) where n is the number of sizes you allow, i.e. upperBou Pool with `OptimalSizeExploringResizer` defined in configuration: -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-optimal-size-exploring-resize-pool } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-optimal-size-exploring-resize-pool } Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #optimal-size-exploring-resize-pool } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #optimal-size-exploring-resize-pool } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #optimal-size-exploring-resize-pool } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #optimal-size-exploring-resize-pool } Several more configuration options are available and described in `akka.actor.deployment.default.optimal-size-exploring-resizer` section of the reference @ref:[configuration](general/configuration.md). @@ -928,10 +928,10 @@ The router created in this example is replicating each message to a few destinat Start with the routing logic: Scala -: @@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #routing-logic } +: @@snip [CustomRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala) { #routing-logic } Java -: @@snip [CustomRouterDocTest.java]($code$/java/jdocs/routing/CustomRouterDocTest.java) { #routing-logic } +: @@snip [CustomRouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/CustomRouterDocTest.java) { #routing-logic } `select` will be called for each message and in this example pick a few destinations by round-robin, by reusing the existing `RoundRobinRoutingLogic` and wrap the result in a `SeveralRoutees` @@ -942,10 +942,10 @@ The implementation of the routing logic must be thread safe, since it might be u A unit test of the routing logic: Scala -: @@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #unit-test-logic } +: @@snip [CustomRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala) { #unit-test-logic } Java -: @@snip [CustomRouterDocTest.java]($code$/java/jdocs/routing/CustomRouterDocTest.java) { #unit-test-logic } +: @@snip [CustomRouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/CustomRouterDocTest.java) { #unit-test-logic } You could stop here and use the `RedundancyRoutingLogic` with a `akka.routing.Router` as described in [A Simple Router](#simple-router). @@ -956,27 +956,27 @@ Create a class that extends `Pool`, `Group` or `CustomRouterConfig`. That class for the routing logic and holds the configuration for the router. Here we make it a `Group`. Scala -: @@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #group } +: @@snip [CustomRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala) { #group } Java -: @@snip [RedundancyGroup.java]($code$/java/jdocs/routing/RedundancyGroup.java) { #group } +: @@snip [RedundancyGroup.java](/akka-docs/src/test/java/jdocs/routing/RedundancyGroup.java) { #group } This can be used exactly as the router actors provided by Akka. Scala -: @@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #usage-1 } +: @@snip [CustomRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala) { #usage-1 } Java -: @@snip [CustomRouterDocTest.java]($code$/java/jdocs/routing/CustomRouterDocTest.java) { #usage-1 } +: @@snip [CustomRouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/CustomRouterDocTest.java) { #usage-1 } Note that we added a constructor in `RedundancyGroup` that takes a `Config` parameter. That makes it possible to define it in configuration. Scala -: @@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #config } +: @@snip [CustomRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala) { #config } Java -: @@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #jconfig } +: @@snip [CustomRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala) { #jconfig } Note the fully qualified class name in the `router` property. The router class must extend `akka.routing.RouterConfig` (`Pool`, `Group` or `CustomRouterConfig`) and have @@ -984,10 +984,10 @@ constructor with one `com.typesafe.config.Config` parameter. The deployment section of the configuration is passed to the constructor. Scala -: @@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #usage-2 } +: @@snip [CustomRouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala) { #usage-2 } Java -: @@snip [CustomRouterDocTest.java]($code$/java/jdocs/routing/CustomRouterDocTest.java) { #usage-2 } +: @@snip [CustomRouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/CustomRouterDocTest.java) { #usage-2 } ## Configuring Dispatchers @@ -997,7 +997,7 @@ The dispatcher for created children of the pool will be taken from To make it easy to define the dispatcher of the routees of the pool you can define the dispatcher inline in the deployment section of the config. -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-pool-dispatcher } +@@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #config-pool-dispatcher } That is the only thing you need to do enable a dedicated dispatcher for a pool. @@ -1019,10 +1019,10 @@ property in their constructor or factory method, custom routers have to implement the method in a suitable way. Scala -: @@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #dispatchers } +: @@snip [RouterDocSpec.scala](/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala) { #dispatchers } Java -: @@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #dispatchers } +: @@snip [RouterDocTest.java](/akka-docs/src/test/java/jdocs/routing/RouterDocTest.java) { #dispatchers } @@@ note diff --git a/akka-docs/src/main/paradox/scheduler.md b/akka-docs/src/main/paradox/scheduler.md index cee2fcb5af..651efb5450 100644 --- a/akka-docs/src/main/paradox/scheduler.md +++ b/akka-docs/src/main/paradox/scheduler.md @@ -53,34 +53,34 @@ by the `akka.scheduler.tick-duration` configuration property. ## Some examples Scala -: @@snip [SchedulerDocSpec.scala]($code$/scala/docs/actor/SchedulerDocSpec.scala) { #imports1 } +: @@snip [SchedulerDocSpec.scala](/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala) { #imports1 } Java -: @@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #imports1 } +: @@snip [SchedulerDocTest.java](/akka-docs/src/test/java/jdocs/actor/SchedulerDocTest.java) { #imports1 } Schedule to send the "foo"-message to the testActor after 50ms: Scala -: @@snip [SchedulerDocSpec.scala]($code$/scala/docs/actor/SchedulerDocSpec.scala) { #schedule-one-off-message } +: @@snip [SchedulerDocSpec.scala](/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala) { #schedule-one-off-message } Java -: @@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #schedule-one-off-message } +: @@snip [SchedulerDocTest.java](/akka-docs/src/test/java/jdocs/actor/SchedulerDocTest.java) { #schedule-one-off-message } Schedule a @scala[function]@java[`Runnable`], that sends the current time to the testActor, to be executed after 50ms: Scala -: @@snip [SchedulerDocSpec.scala]($code$/scala/docs/actor/SchedulerDocSpec.scala) { #schedule-one-off-thunk } +: @@snip [SchedulerDocSpec.scala](/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala) { #schedule-one-off-thunk } Java -: @@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #schedule-one-off-thunk } +: @@snip [SchedulerDocTest.java](/akka-docs/src/test/java/jdocs/actor/SchedulerDocTest.java) { #schedule-one-off-thunk } Schedule to send the "Tick"-message to the `tickActor` after 0ms repeating every 50ms: Scala -: @@snip [SchedulerDocSpec.scala]($code$/scala/docs/actor/SchedulerDocSpec.scala) { #schedule-recurring } +: @@snip [SchedulerDocSpec.scala](/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala) { #schedule-recurring } Java -: @@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #schedule-recurring } +: @@snip [SchedulerDocTest.java](/akka-docs/src/test/java/jdocs/actor/SchedulerDocTest.java) { #schedule-recurring } @@@ warning @@ -95,7 +95,7 @@ necessary parameters) and then call the method when the message is received. ## From `akka.actor.ActorSystem` -@@snip [ActorSystem.scala]($akka$/akka-actor/src/main/scala/akka/actor/ActorSystem.scala) { #scheduler } +@@snip [ActorSystem.scala](/akka-actor/src/main/scala/akka/actor/ActorSystem.scala) { #scheduler } @@@ warning @@ -112,10 +112,10 @@ different one using the `akka.scheduler.implementation` configuration property. The referenced class must implement the following interface: Scala -: @@snip [Scheduler.scala]($akka$/akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #scheduler } +: @@snip [Scheduler.scala](/akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #scheduler } Java -: @@snip [AbstractScheduler.java]($akka$/akka-actor/src/main/java/akka/actor/AbstractScheduler.java) { #scheduler } +: @@snip [AbstractScheduler.java](/akka-actor/src/main/java/akka/actor/AbstractScheduler.java) { #scheduler } ## The Cancellable interface @@ -131,4 +131,4 @@ scheduled task was canceled or will (eventually) have run. @@@ -@@snip [Scheduler.scala]($akka$/akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #cancellable } +@@snip [Scheduler.scala](/akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #cancellable } diff --git a/akka-docs/src/main/paradox/serialization.md b/akka-docs/src/main/paradox/serialization.md index 3859174bc2..4208584499 100644 --- a/akka-docs/src/main/paradox/serialization.md +++ b/akka-docs/src/main/paradox/serialization.md @@ -24,12 +24,12 @@ For Akka to know which `Serializer` to use for what, you need edit your [Configu in the "akka.actor.serializers"-section you bind names to implementations of the `akka.serialization.Serializer` you wish to use, like this: -@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-serializers-config } +@@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-serializers-config } After you've bound names to different implementations of `Serializer` you need to wire which classes should be serialized using which `Serializer`, this is done in the "akka.actor.serialization-bindings"-section: -@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialization-bindings-config } +@@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #serialization-bindings-config } You only need to specify the name of an interface or abstract base class of the messages. In case of ambiguity, i.e. the message implements several of the @@ -79,11 +79,11 @@ Alternatively, you can disable all Java serialization which then automatically w Normally, messages sent between local actors (i.e. same JVM) do not undergo serialization. For testing, sometimes, it may be desirable to force serialization on all messages (both remote and local). If you want to do this in order to verify that your messages are serializable you can enable the following config option: -@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-messages-config } +@@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-messages-config } If you want to verify that your `Props` are serializable you can enable the following config option: -@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-creators-config } +@@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-creators-config } @@@ warning @@ -97,17 +97,17 @@ If you want to programmatically serialize/deserialize using Akka Serialization, here's some examples: Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #imports } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #imports } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #imports } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #imports } Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #programmatic } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #programmatic } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #programmatic } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #programmatic } For more information, have a look at the `ScalaDoc` for `akka.serialization._` @@ -120,17 +120,17 @@ The first code snippet on this page contains a configuration file that reference A custom `Serializer` has to inherit from @scala[`akka.serialization.Serializer`]@java[`akka.serialization.JSerializer`] and can be defined like the following: Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #imports } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #imports } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #imports } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #imports } Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #my-own-serializer } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #my-own-serializer } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #my-own-serializer } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #my-own-serializer } The manifest is a type hint so that the same serializer can be used for different classes. The manifest parameter in @scala[`fromBinary`]@java[`fromBinaryJava`] is the class of the object that @@ -160,10 +160,10 @@ class name if you used `includeManifest=true`, otherwise it will be the empty st This is how a `SerializerWithStringManifest` looks like: Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #my-own-serializer2 } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #my-own-serializer2 } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #my-own-serializer2 } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #my-own-serializer2 } You must also bind it to a name in your [Configuration]() and then list which classes that should be serialized using it. @@ -186,17 +186,17 @@ address which shall be the recipient of the serialized information. Use `Serialization.serializedActorPath(actorRef)` like this: Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #imports } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #imports } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #imports } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #imports } Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #actorref-serializer } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #actorref-serializer } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #actorref-serializer } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #actorref-serializer } This assumes that serialization happens in the context of sending a message through the remote transport. There are other uses of serialization, though, @@ -212,10 +212,10 @@ the appropriate address to use when sending to `remoteAddr` you can use `ActorRefProvider.getExternalAddressFor(remoteAddr)` like this: Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #external-address } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #external-address } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #external-address } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #external-address } @@@ note @@ -242,10 +242,10 @@ There is also a default remote address which is the one used by cluster support (and typical systems have just this one); you can get it like this: Scala -: @@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #external-address-default } +: @@snip [SerializationDocSpec.scala](/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #external-address-default } Java -: @@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #external-address-default } +: @@snip [SerializationDocTest.java](/akka-docs/src/test/java/jdocs/serialization/SerializationDocTest.java) { #external-address-default } Another solution is to encapsulate your serialization code in `Serialization.withTransportInformation`. It ensures the actorRefs are serialized using systems default address when diff --git a/akka-docs/src/main/paradox/stream/operators/ActorFlow/ask.md b/akka-docs/src/main/paradox/stream/operators/ActorFlow/ask.md index 468708b15b..b70f1a4b43 100644 --- a/akka-docs/src/main/paradox/stream/operators/ActorFlow/ask.md +++ b/akka-docs/src/main/paradox/stream/operators/ActorFlow/ask.md @@ -18,7 +18,7 @@ This operator is included in: ## Signature -@@signature [ActorFlow.scala]($akka$/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala) { #ask } +@@signature [ActorFlow.scala](/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala) { #ask } @@@ @@ -31,8 +31,8 @@ a `IOResult` upon reaching the end of the file or if there is a failure. Scala -: @@snip [ask.scala]($akka$/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorFlowSpec.scala) { #imports #ask-actor #ask } +: @@snip [ask.scala](/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorFlowSpec.scala) { #imports #ask-actor #ask } Java -: @@snip [ask.java]($akka$/akka-stream-typed/src/test/java/akka/stream/typed/javadsl/ActorFlowCompileTest.java) { #ask-actor #ask } +: @@snip [ask.java](/akka-stream-typed/src/test/java/akka/stream/typed/javadsl/ActorFlowCompileTest.java) { #ask-actor #ask } diff --git a/akka-docs/src/main/paradox/stream/operators/ActorSink/actorRef.md b/akka-docs/src/main/paradox/stream/operators/ActorSink/actorRef.md index 38aff27d28..93c58b661a 100644 --- a/akka-docs/src/main/paradox/stream/operators/ActorSink/actorRef.md +++ b/akka-docs/src/main/paradox/stream/operators/ActorSink/actorRef.md @@ -18,7 +18,7 @@ This operator is included in: ## Signature -@@signature [ActorSink.scala]($akka$/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala) { #actorRef } +@@signature [ActorSink.scala](/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala) { #actorRef } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/FileIO/fromPath.md b/akka-docs/src/main/paradox/stream/operators/FileIO/fromPath.md index cbc44c5b3f..174b62e9f1 100644 --- a/akka-docs/src/main/paradox/stream/operators/FileIO/fromPath.md +++ b/akka-docs/src/main/paradox/stream/operators/FileIO/fromPath.md @@ -8,7 +8,7 @@ Emit the contents of a file. ## Signature -@@signature [FileIO.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala) { #fromPath } +@@signature [FileIO.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala) { #fromPath } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/FileIO/toPath.md b/akka-docs/src/main/paradox/stream/operators/FileIO/toPath.md index 7347ec179d..8c1e1bbb57 100644 --- a/akka-docs/src/main/paradox/stream/operators/FileIO/toPath.md +++ b/akka-docs/src/main/paradox/stream/operators/FileIO/toPath.md @@ -8,7 +8,7 @@ Create a sink which will write incoming `ByteString` s to a given file path. ## Signature -@@signature [FileIO.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala) { #toPath } +@@signature [FileIO.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala) { #toPath } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Flow/fromSinkAndSource.md b/akka-docs/src/main/paradox/stream/operators/Flow/fromSinkAndSource.md index 2ee4d47237..a09e006eb5 100644 --- a/akka-docs/src/main/paradox/stream/operators/Flow/fromSinkAndSource.md +++ b/akka-docs/src/main/paradox/stream/operators/Flow/fromSinkAndSource.md @@ -8,7 +8,7 @@ Creates a `Flow` from a `Sink` and a `Source` where the Flow's input will be sen ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #fromSinkAndSource } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #fromSinkAndSource } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Flow/fromSinkAndSourceCoupled.md b/akka-docs/src/main/paradox/stream/operators/Flow/fromSinkAndSourceCoupled.md index 9c85ff5455..4e4b7c5d47 100644 --- a/akka-docs/src/main/paradox/stream/operators/Flow/fromSinkAndSourceCoupled.md +++ b/akka-docs/src/main/paradox/stream/operators/Flow/fromSinkAndSourceCoupled.md @@ -8,7 +8,7 @@ Allows coupling termination (cancellation, completion, erroring) of Sinks and So ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #fromSinkAndSourceCoupled } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #fromSinkAndSourceCoupled } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Flow/lazyInitAsync.md b/akka-docs/src/main/paradox/stream/operators/Flow/lazyInitAsync.md index ea6657c480..b064ddd48f 100644 --- a/akka-docs/src/main/paradox/stream/operators/Flow/lazyInitAsync.md +++ b/akka-docs/src/main/paradox/stream/operators/Flow/lazyInitAsync.md @@ -8,7 +8,7 @@ Creates a real `Flow` upon receiving the first element by calling relevant `flow ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #lazyInitAsync } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #lazyInitAsync } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/actorRef.md b/akka-docs/src/main/paradox/stream/operators/Sink/actorRef.md index 88b37550d0..c337f14c8d 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/actorRef.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/actorRef.md @@ -7,7 +7,7 @@ Send the elements from the stream to an `ActorRef`. @@@ div { .group-scala } ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #actorRef } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #actorRef } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/actorRefWithAck.md b/akka-docs/src/main/paradox/stream/operators/Sink/actorRefWithAck.md index c3d550b667..6271283e29 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/actorRefWithAck.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/actorRefWithAck.md @@ -14,18 +14,18 @@ to provide back pressure onto the sink. Actor to be interacted with: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #actorRefWithAck-actor } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #actorRefWithAck-actor } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #actorRefWithAck-actor } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #actorRefWithAck-actor } Using the `actorRefWithAck` operator with the above actor: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #actorRefWithAck } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #actorRefWithAck } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #actorRefWithAck } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #actorRefWithAck } ## Reactive Streams semantics diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/asPublisher.md b/akka-docs/src/main/paradox/stream/operators/Sink/asPublisher.md index f022d35353..e9b4ada7a5 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/asPublisher.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/asPublisher.md @@ -7,7 +7,7 @@ Integration with Reactive Streams, materializes into a `org.reactivestreams.Publ @@@ div { .group-scala } ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #asPublisher } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #asPublisher } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/cancelled.md b/akka-docs/src/main/paradox/stream/operators/Sink/cancelled.md index 0ee8a287d3..143834a3a4 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/cancelled.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/cancelled.md @@ -8,7 +8,7 @@ Immediately cancel the stream ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #cancelled } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #cancelled } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/combine.md b/akka-docs/src/main/paradox/stream/operators/Sink/combine.md index da12dc8c7d..214f9314a7 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/combine.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/combine.md @@ -8,7 +8,7 @@ Combine several sinks into one using a user specified strategy ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #combine } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #combine } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/fold.md b/akka-docs/src/main/paradox/stream/operators/Sink/fold.md index a799678e82..b40bc12363 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/fold.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/fold.md @@ -8,7 +8,7 @@ Fold over emitted element with a function, where each invocation will get the ne ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #fold } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #fold } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/foreach.md b/akka-docs/src/main/paradox/stream/operators/Sink/foreach.md index 5b8dbb5169..ecf4a6bc29 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/foreach.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/foreach.md @@ -8,7 +8,7 @@ Invoke a given procedure for each element received. ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #forEach } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #forEach } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/foreachParallel.md b/akka-docs/src/main/paradox/stream/operators/Sink/foreachParallel.md index 63f9e00d08..0781db63a1 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/foreachParallel.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/foreachParallel.md @@ -8,7 +8,7 @@ Like `foreach` but allows up to `parallellism` procedure calls to happen in para ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #foreachParallel } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #foreachParallel } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/fromSubscriber.md b/akka-docs/src/main/paradox/stream/operators/Sink/fromSubscriber.md index 3bfc750296..5fd29995e5 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/fromSubscriber.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/fromSubscriber.md @@ -8,7 +8,7 @@ Integration with Reactive Streams, wraps a `org.reactivestreams.Subscriber` as a ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #fromSubscriber } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #fromSubscriber } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/head.md b/akka-docs/src/main/paradox/stream/operators/Sink/head.md index ef4775a338..2f8eab32d0 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/head.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/head.md @@ -8,7 +8,7 @@ Materializes into a @scala[`Future`] @java[`CompletionStage`] which completes wi ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #head } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #head } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/headOption.md b/akka-docs/src/main/paradox/stream/operators/Sink/headOption.md index 2625716f73..f85d5d9cd6 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/headOption.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/headOption.md @@ -8,7 +8,7 @@ Materializes into a @scala[`Future[Option[T]]`] @java[`CompletionStage>`] ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #lastOption } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #lastOption } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/lazyInitAsync.md b/akka-docs/src/main/paradox/stream/operators/Sink/lazyInitAsync.md index 3b00a845c8..39592e5877 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/lazyInitAsync.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/lazyInitAsync.md @@ -8,7 +8,7 @@ Creates a real `Sink` upon receiving the first element. ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #lazyInitAsync } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #lazyInitAsync } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/onComplete.md b/akka-docs/src/main/paradox/stream/operators/Sink/onComplete.md index da63a5a566..6d6d41fba3 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/onComplete.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/onComplete.md @@ -8,7 +8,7 @@ Invoke a callback when the stream has completed or failed. ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #onComplete } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #onComplete } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/preMaterialize.md b/akka-docs/src/main/paradox/stream/operators/Sink/preMaterialize.md index d71e6cf83a..a82781ca3d 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/preMaterialize.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/preMaterialize.md @@ -8,7 +8,7 @@ Materializes this Sink, immediately returning (1) its materialized value, and (2 ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #preMaterialize } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #preMaterialize } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/queue.md b/akka-docs/src/main/paradox/stream/operators/Sink/queue.md index 835abed44c..120c5912a7 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/queue.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/queue.md @@ -8,7 +8,7 @@ Materialize a `SinkQueue` that can be pulled to trigger demand through the sink. ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #queue } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #queue } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/reduce.md b/akka-docs/src/main/paradox/stream/operators/Sink/reduce.md index 26ea5a5295..2f9f47fe75 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/reduce.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/reduce.md @@ -8,7 +8,7 @@ Apply a reduction function on the incoming elements and pass the result to the n ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #reduce } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #reduce } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/seq.md b/akka-docs/src/main/paradox/stream/operators/Sink/seq.md index 27b5c36474..778f912131 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/seq.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/seq.md @@ -8,7 +8,7 @@ Collect values emitted from the stream into a collection. ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #seq } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #seq } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Sink/takeLast.md b/akka-docs/src/main/paradox/stream/operators/Sink/takeLast.md index f0f5e33fc0..0e992f1765 100644 --- a/akka-docs/src/main/paradox/stream/operators/Sink/takeLast.md +++ b/akka-docs/src/main/paradox/stream/operators/Sink/takeLast.md @@ -8,7 +8,7 @@ Collect the last `n` values emitted from the stream into a collection. ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #takeLast } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala) { #takeLast } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/alsoTo.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/alsoTo.md index 39de73d130..561d21109b 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/alsoTo.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/alsoTo.md @@ -7,7 +7,7 @@ Attaches the given `Sink` to this `Flow`, meaning that elements that pass throug @@@ div { .group-scala } ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #alsoTo } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #alsoTo } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/apply.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/apply.md index 4147f8a09a..c737afe35e 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/apply.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/apply.md @@ -7,7 +7,7 @@ Stream the values of an `immutable.Seq`. @@@ div { .group-scala } ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #apply } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #apply } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/ask.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/ask.md index 1e12bb4842..f2072651ae 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/ask.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/ask.md @@ -7,7 +7,7 @@ Use the `ask` pattern to send a request-reply message to the target `ref` actor. @@@ div { .group-scala } ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #ask } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #ask } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/backpressureTimeout.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/backpressureTimeout.md index dfa5759f50..9d55d2328e 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/backpressureTimeout.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/backpressureTimeout.md @@ -7,7 +7,7 @@ If the time between the emission of an element and the following downstream dema @@@ div { .group-scala } ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #backpressureTimeout } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #backpressureTimeout } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/batch.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/batch.md index f28615c860..fe9a97f354 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/batch.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/batch.md @@ -7,7 +7,7 @@ Allow for a slower downstream by passing incoming elements and a summary into an @@@ div { .group-scala } ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #batch } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #batch } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/batchWeighted.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/batchWeighted.md index 7659019e7a..bd1e2873bc 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/batchWeighted.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/batchWeighted.md @@ -7,7 +7,7 @@ Allow for a slower downstream by passing incoming elements and a summary into an @@@ div { .group-scala } ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #batchWeighted } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #batchWeighted } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/buffer.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/buffer.md index d5bd802245..a8ca8cbb77 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/buffer.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/buffer.md @@ -7,7 +7,7 @@ Allow for a temporarily faster upstream events by buffering `size` elements. @@@ div { .group-scala } ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #buffer } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #buffer } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/collect.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/collect.md index 4a3c3fcd75..7c13b08487 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/collect.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/collect.md @@ -8,7 +8,7 @@ Apply a partial function to each incoming element, if the partial function is de ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #collect } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #collect } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/collectType.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/collectType.md index 1cbcfa18c6..897052535b 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/collectType.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/collectType.md @@ -8,7 +8,7 @@ Transform this stream by testing the type of each of the elements on which the e ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #collectType } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #collectType } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/completionTimeout.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/completionTimeout.md index fb35eef19c..3a9f4f86a2 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/completionTimeout.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/completionTimeout.md @@ -8,7 +8,7 @@ If the completion of the stream does not happen until the provided timeout, the ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #completionTimeout } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #completionTimeout } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/concat.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/concat.md index 51048b5327..905060e57d 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/concat.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/concat.md @@ -8,7 +8,7 @@ After completion of the original upstream the elements of the given source will ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #concat } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #concat } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/conflate.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/conflate.md index eb2ee541a3..453b10bd6a 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/conflate.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/conflate.md @@ -8,7 +8,7 @@ Allow for a slower downstream by passing incoming elements and a summary into an ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #conflate } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #conflate } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/conflateWithSeed.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/conflateWithSeed.md index 7f769f601a..85003a6487 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/conflateWithSeed.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/conflateWithSeed.md @@ -8,7 +8,7 @@ Allow for a slower downstream by passing incoming elements and a summary into an ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #conflateWithSeed } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #conflateWithSeed } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/delay.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/delay.md index 6db9c46ae5..184d26523c 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/delay.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/delay.md @@ -8,7 +8,7 @@ Delay every element passed through with a specific duration. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #delay } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #delay } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/detach.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/detach.md index a306e48e77..1a368d32df 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/detach.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/detach.md @@ -8,7 +8,7 @@ Detach upstream demand from downstream demand without detaching the stream rates ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #detach } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #detach } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/divertTo.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/divertTo.md index 62575038ed..efde709b35 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/divertTo.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/divertTo.md @@ -8,7 +8,7 @@ Each upstream element will either be diverted to the given sink, or the downstre ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #divertTo } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #divertTo } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/drop.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/drop.md index 820b3e5541..49b37c13f8 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/drop.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/drop.md @@ -8,7 +8,7 @@ Drop `n` elements and then pass any subsequent element downstream. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #drop } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #drop } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/dropWhile.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/dropWhile.md index af5123d30d..0e9d043a79 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/dropWhile.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/dropWhile.md @@ -8,7 +8,7 @@ Drop elements as long as a predicate function return true for the element ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #dropWhile } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #dropWhile } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/dropWithin.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/dropWithin.md index d2f6d8f7d9..cf24995274 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/dropWithin.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/dropWithin.md @@ -8,7 +8,7 @@ Drop elements until a timeout has fired ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #dropWithin } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #dropWithin } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/expand.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/expand.md index 91fc9ed003..5f0f46f953 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/expand.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/expand.md @@ -8,7 +8,7 @@ Like `extrapolate`, but does not have the `initial` argument, and the `Iterator` ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #expand } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #expand } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/extrapolate.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/extrapolate.md index fc74a0e2c5..f94d2a6828 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/extrapolate.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/extrapolate.md @@ -8,7 +8,7 @@ Allow for a faster downstream by expanding the last emitted element to an `Itera ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #extrapolate } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #extrapolate } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/filter.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/filter.md index 78ed3658ba..2b041d61c8 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/filter.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/filter.md @@ -8,7 +8,7 @@ Filter the incoming elements using a predicate. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #filter } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #filter } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/filterNot.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/filterNot.md index 56bd561693..71e9ff9f1b 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/filterNot.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/filterNot.md @@ -8,7 +8,7 @@ Filter the incoming elements using a predicate. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #filterNot } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #filterNot } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/flatMapConcat.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/flatMapConcat.md index 75956fce9e..fa9605c406 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/flatMapConcat.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/flatMapConcat.md @@ -8,7 +8,7 @@ Transform each input element into a `Source` whose elements are then flattened i ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #flatMapConcat } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #flatMapConcat } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/flatMapMerge.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/flatMapMerge.md index e6dbd1a7aa..dcf2f81092 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/flatMapMerge.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/flatMapMerge.md @@ -8,7 +8,7 @@ Transform each input element into a `Source` whose elements are then flattened i ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #flatMapMerge } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #flatMapMerge } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/fold.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/fold.md index cbdd8f34b7..ff38d53cfa 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/fold.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/fold.md @@ -8,7 +8,7 @@ Start with current value `zero` and then apply the current and next value to the ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #fold } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #fold } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/foldAsync.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/foldAsync.md index 7d3f2a41d1..1ea123f5cb 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/foldAsync.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/foldAsync.md @@ -8,7 +8,7 @@ Just like `fold` but receives a function that results in a @scala[`Future`] @jav ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #foldAsync } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #foldAsync } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupBy.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupBy.md index 32f524accb..95f89d9f13 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupBy.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupBy.md @@ -8,7 +8,7 @@ Demultiplex the incoming stream into separate output streams. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #groupBy } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #groupBy } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/grouped.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/grouped.md index 33027f0c1c..2afbcd8208 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/grouped.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/grouped.md @@ -8,7 +8,7 @@ Accumulate incoming events until the specified number of elements have been accu ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #grouped } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #grouped } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupedWeightedWithin.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupedWeightedWithin.md index 1b511ef29d..8d236bbd29 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupedWeightedWithin.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupedWeightedWithin.md @@ -8,7 +8,7 @@ Chunk up this stream into groups of elements received within a time window, or l ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #groupedWeightedWithin } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #groupedWeightedWithin } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupedWithin.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupedWithin.md index dacb25c37b..9f79aee8ad 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupedWithin.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/groupedWithin.md @@ -8,7 +8,7 @@ Chunk up this stream into groups of elements received within a time window, or l ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #groupedWithin } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #groupedWithin } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/idleTimeout.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/idleTimeout.md index 9d1e299bcf..e1db7821dc 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/idleTimeout.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/idleTimeout.md @@ -8,7 +8,7 @@ If the time between two processed elements exceeds the provided timeout, the str ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #idleTimeout } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #idleTimeout } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/initialDelay.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/initialDelay.md index 6565c62325..0e564b6c2c 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/initialDelay.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/initialDelay.md @@ -8,7 +8,7 @@ Delays the initial element by the specified duration. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #initialDelay } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #initialDelay } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/initialTimeout.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/initialTimeout.md index 9caca6b94b..d94b0c3bc6 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/initialTimeout.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/initialTimeout.md @@ -8,7 +8,7 @@ If the first element has not passed through this operators before the provided t ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #initialTimeout } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #initialTimeout } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/interleave.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/interleave.md index 0ec3535d6a..bd5499bf76 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/interleave.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/interleave.md @@ -8,7 +8,7 @@ Emits a specifiable number of elements from the original source, then from the p ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #interleave } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #interleave } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/intersperse.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/intersperse.md index 9a61ab74b8..99db75e286 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/intersperse.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/intersperse.md @@ -8,7 +8,7 @@ Intersperse stream with provided element similar to `List.mkString`. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #intersperse } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #intersperse } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/keepAlive.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/keepAlive.md index 1573518613..43a27b2972 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/keepAlive.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/keepAlive.md @@ -8,7 +8,7 @@ Injects additional (configured) elements if upstream does not emit for a configu ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #keepAlive } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #keepAlive } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/limit.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/limit.md index 162e4d6b41..c485a80ec4 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/limit.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/limit.md @@ -8,7 +8,7 @@ Limit number of element from upstream to given `max` number. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #limit } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #limit } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/limitWeighted.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/limitWeighted.md index 0445907e9b..76dea63d98 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/limitWeighted.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/limitWeighted.md @@ -8,7 +8,7 @@ Ensure stream boundedness by evaluating the cost of incoming elements using a co ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #limitWeighted } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #limitWeighted } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/log.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/log.md index 3d9e58c8a9..1dc9e16684 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/log.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/log.md @@ -8,7 +8,7 @@ Log elements flowing through the stream as well as completion and erroring. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #log } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #log } @@@ @@ -32,7 +32,7 @@ This can be changed by calling @scala[`Attributes.logLevels(...)`] @java[`Attrib ## Example Scala -: @@snip [SourceOrFlow.scala]($code$/scala/docs/stream/operators/SourceOrFlow.scala) { #log } +: @@snip [SourceOrFlow.scala](/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala) { #log } Java -: @@snip [SourceOrFlow.java]($code$/java/jdocs/stream/operators/SourceOrFlow.java) { #log } +: @@snip [SourceOrFlow.java](/akka-docs/src/test/java/jdocs/stream/operators/SourceOrFlow.java) { #log } diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/map.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/map.md index 38d7915f8c..9352383be7 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/map.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/map.md @@ -8,7 +8,7 @@ Transform each element in the stream by calling a mapping function with it and p ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #map } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #map } @@@ @@ -31,7 +31,7 @@ Transform each element in the stream by calling a mapping function with it and p Scala -: @@snip [Flow.scala]($akka$/akka-docs/src/test/scala/docs/stream/operators/Map.scala) { #imports #map } +: @@snip [Flow.scala](/akka-docs/src/test/scala/docs/stream/operators/Map.scala) { #imports #map } diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapAsync.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapAsync.md index 1432d0a352..175e784f2e 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapAsync.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapAsync.md @@ -8,7 +8,7 @@ Pass incoming elements to a function that return a @scala[`Future`] @java[`Compl ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mapAsync } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mapAsync } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapAsyncUnordered.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapAsyncUnordered.md index 16c2a22ad2..eb0fc39532 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapAsyncUnordered.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapAsyncUnordered.md @@ -8,7 +8,7 @@ Like `mapAsync` but @scala[`Future`] @java[`CompletionStage`] results are passed ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mapAsyncUnordered } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mapAsyncUnordered } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapConcat.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapConcat.md index dfa309e905..0e28c7cb0f 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapConcat.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapConcat.md @@ -8,7 +8,7 @@ Transform each element into zero or more elements that are individually passed d ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mapConcat } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mapConcat } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapError.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapError.md index c5fb99c8de..18208430fa 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapError.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mapError.md @@ -8,7 +8,7 @@ While similar to `recover` this operators can be used to transform an error sign ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mapError } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mapError } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/merge.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/merge.md index 39181a746e..b109698246 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/merge.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/merge.md @@ -8,7 +8,7 @@ Merge multiple sources. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #merge } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #merge } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mergeSorted.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mergeSorted.md index 2e64d438d0..e49a547497 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mergeSorted.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/mergeSorted.md @@ -8,7 +8,7 @@ Merge multiple sources. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mergeSorted } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #mergeSorted } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/monitor.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/monitor.md index e60264356f..be448100cd 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/monitor.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/monitor.md @@ -8,7 +8,7 @@ Materializes to a `FlowMonitor` that monitors messages flowing through or comple ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #monitor } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #monitor } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/orElse.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/orElse.md index 63b0addc49..c0da22b0fd 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/orElse.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/orElse.md @@ -8,7 +8,7 @@ If the primary source completes without emitting any elements, the elements from ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #orElse } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #orElse } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/prefixAndTail.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/prefixAndTail.md index 7bd1a9f011..a09467a145 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/prefixAndTail.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/prefixAndTail.md @@ -8,7 +8,7 @@ Take up to *n* elements from the stream (less than *n* only if the upstream comp ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #prefixAndTail } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #prefixAndTail } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/prepend.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/prepend.md index da8439c03e..0d742a4977 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/prepend.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/prepend.md @@ -8,7 +8,7 @@ Prepends the given source to the flow, consuming it until completion before the ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #prepend } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #prepend } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recover.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recover.md index 6c68beee39..c9e4bac0b3 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recover.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recover.md @@ -8,7 +8,7 @@ Allow sending of one last element downstream when a failure has happened upstrea ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #recover } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #recover } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recoverWith.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recoverWith.md index 3dcc4cd1cd..45668cd61d 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recoverWith.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recoverWith.md @@ -8,7 +8,7 @@ Allow switching to alternative Source when a failure has happened upstream. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #recoverWith } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #recoverWith } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recoverWithRetries.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recoverWithRetries.md index a51b7c6ef2..8323ba24ec 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recoverWithRetries.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/recoverWithRetries.md @@ -8,7 +8,7 @@ RecoverWithRetries allows to switch to alternative Source on flow failure. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #recoverWithRetries } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #recoverWithRetries } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/reduce.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/reduce.md index 7dc0d6c805..c4dcad6445 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/reduce.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/reduce.md @@ -8,7 +8,7 @@ Start with first element and then apply the current and next value to the given ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #reduce } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #reduce } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/scan.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/scan.md index 27809cc26b..c299057b1d 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/scan.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/scan.md @@ -8,7 +8,7 @@ Emit its current value, which starts at `zero`, and then apply the current and n ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #scan } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #scan } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/scanAsync.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/scanAsync.md index 36e7495392..6e1851f046 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/scanAsync.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/scanAsync.md @@ -8,7 +8,7 @@ Just like `scan` but receives a function that results in a @scala[`Future`] @jav ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #scanAsync } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #scanAsync } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/sliding.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/sliding.md index b1a0b85501..c42634a0a8 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/sliding.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/sliding.md @@ -8,7 +8,7 @@ Provide a sliding window over the incoming stream and pass the windows as groups ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #sliding } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #sliding } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/splitAfter.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/splitAfter.md index df056f07d5..0069743a26 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/splitAfter.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/splitAfter.md @@ -8,7 +8,7 @@ End the current substream whenever a predicate returns `true`, starting a new su ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #splitAfter } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #splitAfter } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/splitWhen.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/splitWhen.md index 8c5dd2379b..d84f172cd7 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/splitWhen.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/splitWhen.md @@ -8,7 +8,7 @@ Split off elements into a new substream whenever a predicate function return `tr ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #splitWhen } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #splitWhen } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/statefulMapConcat.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/statefulMapConcat.md index 7dad475858..f46bced7c2 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/statefulMapConcat.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/statefulMapConcat.md @@ -8,7 +8,7 @@ Transform each element into zero or more elements that are individually passed d ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #statefulMapConcat } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #statefulMapConcat } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/take.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/take.md index 2307b19b28..a0ad012f30 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/take.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/take.md @@ -8,7 +8,7 @@ Pass `n` incoming elements downstream and then complete ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #take } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #take } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/takeWhile.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/takeWhile.md index d2ddb9bdf9..32e9777a46 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/takeWhile.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/takeWhile.md @@ -8,7 +8,7 @@ Pass elements downstream as long as a predicate function return true for the ele ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #takeWhile } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #takeWhile } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/takeWithin.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/takeWithin.md index 25a0191abc..0d73a769a8 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/takeWithin.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/takeWithin.md @@ -8,7 +8,7 @@ Pass elements downstream within a timeout and then complete. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #takeWithin } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #takeWithin } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/throttle.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/throttle.md index ef397697dc..df6a4a62fe 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/throttle.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/throttle.md @@ -8,7 +8,7 @@ Limit the throughput to a specific number of elements per time unit, or a specif ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #throttle } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #throttle } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/watch.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/watch.md index 9d5cec2d87..19011a2491 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/watch.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/watch.md @@ -8,7 +8,7 @@ Watch a specific `ActorRef` and signal a failure downstream once the actor termi ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #watch } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #watch } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/watchTermination.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/watchTermination.md index 727b46b82a..7f80a8dd81 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/watchTermination.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/watchTermination.md @@ -8,7 +8,7 @@ Materializes to a @scala[`Future`] @java[`CompletionStage`] that will be complet ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #watchTermination } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #watchTermination } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/wireTap.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/wireTap.md index a72770a6f9..8f74917132 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/wireTap.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/wireTap.md @@ -8,7 +8,7 @@ Attaches the given `Sink` to this `Flow` as a wire tap, meaning that elements th ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #wireTap } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #wireTap } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zip.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zip.md index 97d5050516..a19cc5ee1d 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zip.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zip.md @@ -8,7 +8,7 @@ Combines elements from each of multiple sources into @scala[tuples] @java[*Pair* ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #zip } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #zip } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zipWith.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zipWith.md index be04c3d230..88b557204c 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zipWith.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zipWith.md @@ -8,7 +8,7 @@ Combines elements from multiple sources through a `combine` function and passes ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #zipWith } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #zipWith } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zipWithIndex.md b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zipWithIndex.md index 0301eecbb9..1ea070b545 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zipWithIndex.md +++ b/akka-docs/src/main/paradox/stream/operators/Source-or-Flow/zipWithIndex.md @@ -8,7 +8,7 @@ Zips elements of current flow with its indices. ## Signature -@@signature [Flow.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #zipWithIndex } +@@signature [Flow.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala) { #zipWithIndex } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/actorRef.md b/akka-docs/src/main/paradox/stream/operators/Source/actorRef.md index b297327c76..93ad75e22e 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/actorRef.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/actorRef.md @@ -7,7 +7,7 @@ Materialize an `ActorRef`; sending messages to it will emit them on the stream. @@@ div { .group-scala } ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #actorRef } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #actorRef } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/Source/asSubscriber.md b/akka-docs/src/main/paradox/stream/operators/Source/asSubscriber.md index 3d9ae97be5..e5040483ee 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/asSubscriber.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/asSubscriber.md @@ -7,7 +7,7 @@ Integration with Reactive Streams, materializes into a `org.reactivestreams.Subs @@@ div { .group-scala } ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #asSubscriber } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #asSubscriber } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/Source/combine.md b/akka-docs/src/main/paradox/stream/operators/Source/combine.md index 55bf156864..ecd5799a1f 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/combine.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/combine.md @@ -8,7 +8,7 @@ Combine several sources, using a given strategy such as merge or concat, into on ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #combine } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #combine } @@@ @@ -27,7 +27,7 @@ Combine several sources, using a given strategy such as merge or concat, into on Scala -: @@snip [combine.scala]($akka$/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala) { #imports #combine } +: @@snip [combine.scala](/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala) { #imports #combine } diff --git a/akka-docs/src/main/paradox/stream/operators/Source/cycle.md b/akka-docs/src/main/paradox/stream/operators/Source/cycle.md index 99189fc2b8..0c75d751fe 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/cycle.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/cycle.md @@ -8,7 +8,7 @@ Stream iterator in cycled manner. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #cycle } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #cycle } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/empty.md b/akka-docs/src/main/paradox/stream/operators/Source/empty.md index 00da4208cc..a3c35421dc 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/empty.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/empty.md @@ -8,7 +8,7 @@ Complete right away without ever emitting any elements. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #empty } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #empty } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/failed.md b/akka-docs/src/main/paradox/stream/operators/Source/failed.md index f057950178..0b182f639e 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/failed.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/failed.md @@ -8,7 +8,7 @@ Fail directly with a user specified exception. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #failed } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #failed } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/from.md b/akka-docs/src/main/paradox/stream/operators/Source/from.md index 6f56423ca8..17f7ed9d09 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/from.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/from.md @@ -9,7 +9,7 @@ Stream the values of an `Iterable`. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala) { #from } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala) { #from } @@@ @@ -30,4 +30,4 @@ as a source. Otherwise the stream may fail with `ConcurrentModificationException ## Examples Java -: @@snip [from.java]($akka$/akka-docs/src/test/java/jdocs/stream/operators/SourceDocExamples.java) { #imports #source-from-example } +: @@snip [from.java](/akka-docs/src/test/java/jdocs/stream/operators/SourceDocExamples.java) { #imports #source-from-example } diff --git a/akka-docs/src/main/paradox/stream/operators/Source/fromCompletionStage.md b/akka-docs/src/main/paradox/stream/operators/Source/fromCompletionStage.md index 939cd24dfc..7310b7d7fa 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/fromCompletionStage.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/fromCompletionStage.md @@ -8,7 +8,7 @@ Send the single value of the `CompletionStage` when it completes and there is de ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromCompletionStage } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromCompletionStage } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/fromFuture.md b/akka-docs/src/main/paradox/stream/operators/Source/fromFuture.md index fee6c52cb1..c695fbd35e 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/fromFuture.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/fromFuture.md @@ -8,7 +8,7 @@ Send the single value of the `Future` when it completes and there is demand. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromFuture } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromFuture } @@@ @@ -28,5 +28,5 @@ If the future fails the stream is failed with that exception. ## Example Scala -: @@snip [SourceFromFuture.scala]($akka$/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala) { #sourceFromFuture } +: @@snip [SourceFromFuture.scala](/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala) { #sourceFromFuture } diff --git a/akka-docs/src/main/paradox/stream/operators/Source/fromFutureSource.md b/akka-docs/src/main/paradox/stream/operators/Source/fromFutureSource.md index 1fa3254170..5fd51d26dc 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/fromFutureSource.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/fromFutureSource.md @@ -8,7 +8,7 @@ Streams the elements of the given future source once it successfully completes. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromFutureSource } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromFutureSource } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/fromIterator.md b/akka-docs/src/main/paradox/stream/operators/Source/fromIterator.md index c31325b80c..127676514f 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/fromIterator.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/fromIterator.md @@ -8,7 +8,7 @@ Stream the values from an `Iterator`, requesting the next value when there is de ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromIterator } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromIterator } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/fromPublisher.md b/akka-docs/src/main/paradox/stream/operators/Source/fromPublisher.md index 67aa909022..b6929add0e 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/fromPublisher.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/fromPublisher.md @@ -8,7 +8,7 @@ Integration with Reactive Streams, subscribes to a `org.reactivestreams.Publishe ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromPublisher } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #fromPublisher } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/lazily.md b/akka-docs/src/main/paradox/stream/operators/Source/lazily.md index 8eaf664414..7761d43e4e 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/lazily.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/lazily.md @@ -8,7 +8,7 @@ Defers creation and materialization of a `Source` until there is demand. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #lazily } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #lazily } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/maybe.md b/akka-docs/src/main/paradox/stream/operators/Source/maybe.md index 85ae8bdb99..c077a2f1be 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/maybe.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/maybe.md @@ -8,7 +8,7 @@ Materialize a @scala[`Promise[Option[T]]`] @java[`CompletionStage`] that if comp ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #maybe } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #maybe } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/queue.md b/akka-docs/src/main/paradox/stream/operators/Source/queue.md index 3fa4a03e5c..28e9d3dfdd 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/queue.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/queue.md @@ -8,7 +8,7 @@ Materialize a `SourceQueue` onto which elements can be pushed for emitting from ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #queue } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #queue } @@@ @@ -28,10 +28,10 @@ In combination with the queue, the @ref[`throttle`](./../Source-or-Flow/throttle ## Example Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #source-queue } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #source-queue } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #source-queue } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #source-queue } ## Reactive Streams Semantics diff --git a/akka-docs/src/main/paradox/stream/operators/Source/range.md b/akka-docs/src/main/paradox/stream/operators/Source/range.md index c6ff4ced5c..f6e9b7c609 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/range.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/range.md @@ -31,9 +31,9 @@ Emit each integer in a range, with an option to take bigger steps than 1. @scala Define the range of integers. Java -: @@snip [SourceDocExamples.java]($akka$/akka-docs/src/test/java/jdocs/stream/operators/SourceDocExamples.java) { #range-imports #range } +: @@snip [SourceDocExamples.java](/akka-docs/src/test/java/jdocs/stream/operators/SourceDocExamples.java) { #range-imports #range } Print out the stream of integers. Java -: @@snip [SourceDocExamples.java]($akka$/akka-docs/src/test/java/jdocs/stream/operators/SourceDocExamples.java) { #run-range} +: @@snip [SourceDocExamples.java](/akka-docs/src/test/java/jdocs/stream/operators/SourceDocExamples.java) { #run-range} diff --git a/akka-docs/src/main/paradox/stream/operators/Source/repeat.md b/akka-docs/src/main/paradox/stream/operators/Source/repeat.md index 82df94d41e..fd565264fd 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/repeat.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/repeat.md @@ -8,7 +8,7 @@ Stream a single object repeatedly ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #repeat } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #repeat } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/single.md b/akka-docs/src/main/paradox/stream/operators/Source/single.md index 8541e26b6d..1177adf79f 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/single.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/single.md @@ -8,7 +8,7 @@ Stream a single object ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #single } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #single } @@@ @@ -28,9 +28,9 @@ Stream a single object ## Examples Scala -: @@snip [source.scala]($akka$/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala) { #imports #source-single } +: @@snip [source.scala](/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala) { #imports #source-single } Java -: @@snip [source.java]($akka$/akka-stream-tests/src/test/java/akka/stream/javadsl/SourceTest.java) { #imports #source-single } +: @@snip [source.java](/akka-stream-tests/src/test/java/akka/stream/javadsl/SourceTest.java) { #imports #source-single } diff --git a/akka-docs/src/main/paradox/stream/operators/Source/tick.md b/akka-docs/src/main/paradox/stream/operators/Source/tick.md index b1a2f1d8e5..d97e4d8347 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/tick.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/tick.md @@ -8,7 +8,7 @@ A periodical repetition of an arbitrary object. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #tick } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #tick } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/unfold.md b/akka-docs/src/main/paradox/stream/operators/Source/unfold.md index b634ea3ba8..2abc087910 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/unfold.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/unfold.md @@ -8,7 +8,7 @@ Stream the result of a function as long as it returns a @scala[`Some`] @java[`Op ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #unfold } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #unfold } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/unfoldAsync.md b/akka-docs/src/main/paradox/stream/operators/Source/unfoldAsync.md index 79a465fe38..b5a3203114 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/unfoldAsync.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/unfoldAsync.md @@ -8,7 +8,7 @@ Just like `unfold` but the fold function returns a @scala[`Future`] @java[`Compl ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #unfoldAsync } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #unfoldAsync } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/unfoldResource.md b/akka-docs/src/main/paradox/stream/operators/Source/unfoldResource.md index e5a4c21635..0c6984945c 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/unfoldResource.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/unfoldResource.md @@ -8,7 +8,7 @@ Wrap any resource that can be opened, queried for next element (in a blocking wa ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #unfoldResource } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #unfoldResource } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/unfoldResourceAsync.md b/akka-docs/src/main/paradox/stream/operators/Source/unfoldResourceAsync.md index c79e7e020b..bb1d5b6361 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/unfoldResourceAsync.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/unfoldResourceAsync.md @@ -8,7 +8,7 @@ Wrap any resource that can be opened, queried for next element (in a blocking wa ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #unfoldResourceAsync } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #unfoldResourceAsync } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/zipN.md b/akka-docs/src/main/paradox/stream/operators/Source/zipN.md index c283b0bc49..e734bbb9d5 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/zipN.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/zipN.md @@ -8,7 +8,7 @@ Combine the elements of multiple streams into a stream of sequences. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #zipN } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #zipN } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/Source/zipWithN.md b/akka-docs/src/main/paradox/stream/operators/Source/zipWithN.md index cfed23b9f6..8e1151b46b 100644 --- a/akka-docs/src/main/paradox/stream/operators/Source/zipWithN.md +++ b/akka-docs/src/main/paradox/stream/operators/Source/zipWithN.md @@ -8,7 +8,7 @@ Combine the elements of multiple streams into a stream of sequences using a comb ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #zipWithN } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala) { #zipWithN } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/StreamConverters/asInputStream.md b/akka-docs/src/main/paradox/stream/operators/StreamConverters/asInputStream.md index 761900f160..dc6dbed965 100644 --- a/akka-docs/src/main/paradox/stream/operators/StreamConverters/asInputStream.md +++ b/akka-docs/src/main/paradox/stream/operators/StreamConverters/asInputStream.md @@ -7,7 +7,7 @@ Create a sink which materializes into an `InputStream` that can be read to trigg @@@ div { .group-scala } ## Signature -@@signature [StreamConverters.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #asInputStream } +@@signature [StreamConverters.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #asInputStream } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/StreamConverters/asJavaStream.md b/akka-docs/src/main/paradox/stream/operators/StreamConverters/asJavaStream.md index 759ba09266..72604bddeb 100644 --- a/akka-docs/src/main/paradox/stream/operators/StreamConverters/asJavaStream.md +++ b/akka-docs/src/main/paradox/stream/operators/StreamConverters/asJavaStream.md @@ -7,7 +7,7 @@ Create a sink which materializes into Java 8 `Stream` that can be run to trigger @@@ div { .group-scala } ## Signature -@@signature [StreamConverters.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #asJavaStream } +@@signature [StreamConverters.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #asJavaStream } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/StreamConverters/asOutputStream.md b/akka-docs/src/main/paradox/stream/operators/StreamConverters/asOutputStream.md index 7a0afcf6ea..c63368ea0d 100644 --- a/akka-docs/src/main/paradox/stream/operators/StreamConverters/asOutputStream.md +++ b/akka-docs/src/main/paradox/stream/operators/StreamConverters/asOutputStream.md @@ -7,7 +7,7 @@ Create a source that materializes into an `OutputStream`. @@@ div { .group-scala } ## Signature -@@signature [StreamConverters.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #asOutputStream } +@@signature [StreamConverters.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #asOutputStream } @@@ ## Description diff --git a/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromInputStream.md b/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromInputStream.md index 00746b4b81..e0ab09dd45 100644 --- a/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromInputStream.md +++ b/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromInputStream.md @@ -8,7 +8,7 @@ Create a source that wraps an `InputStream`. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #fromInputStream } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #fromInputStream } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromJavaStream.md b/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromJavaStream.md index 23d61c4e2f..dfa7eba5c8 100644 --- a/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromJavaStream.md +++ b/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromJavaStream.md @@ -8,7 +8,7 @@ Create a source that wraps a Java 8 `Stream`. ## Signature -@@signature [Source.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #fromJavaStream } +@@signature [Source.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #fromJavaStream } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromOutputStream.md b/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromOutputStream.md index d8f7d2858c..4cf465d4aa 100644 --- a/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromOutputStream.md +++ b/akka-docs/src/main/paradox/stream/operators/StreamConverters/fromOutputStream.md @@ -8,7 +8,7 @@ Create a sink that wraps an `OutputStream`. ## Signature -@@signature [Sink.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #fromOutputStream } +@@signature [Sink.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #fromOutputStream } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/StreamConverters/javaCollector.md b/akka-docs/src/main/paradox/stream/operators/StreamConverters/javaCollector.md index aa03ccea0f..2831e9e2f5 100644 --- a/akka-docs/src/main/paradox/stream/operators/StreamConverters/javaCollector.md +++ b/akka-docs/src/main/paradox/stream/operators/StreamConverters/javaCollector.md @@ -8,7 +8,7 @@ Create a sink which materializes into a @scala[`Future`] @java[`CompletionStage` ## Signature -@@signature [StreamConverters.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #javaCollector } +@@signature [StreamConverters.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #javaCollector } @@@ diff --git a/akka-docs/src/main/paradox/stream/operators/StreamConverters/javaCollectorParallelUnordered.md b/akka-docs/src/main/paradox/stream/operators/StreamConverters/javaCollectorParallelUnordered.md index 6715d59b2d..40fc635734 100644 --- a/akka-docs/src/main/paradox/stream/operators/StreamConverters/javaCollectorParallelUnordered.md +++ b/akka-docs/src/main/paradox/stream/operators/StreamConverters/javaCollectorParallelUnordered.md @@ -8,7 +8,7 @@ Create a sink which materializes into a @scala[`Future`] @java[`CompletionStage` ## Signature -@@signature [StreamConverters.scala]($akka$/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #javaCollectorParallelUnordered } +@@signature [StreamConverters.scala](/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala) { #javaCollectorParallelUnordered } @@@ diff --git a/akka-docs/src/main/paradox/stream/stream-composition.md b/akka-docs/src/main/paradox/stream/stream-composition.md index 2c45a3270d..daba8c933d 100644 --- a/akka-docs/src/main/paradox/stream/stream-composition.md +++ b/akka-docs/src/main/paradox/stream/stream-composition.md @@ -79,10 +79,10 @@ with the rest of the graph), but this demonstrates the uniform underlying model. If we try to build a code snippet that corresponds to the above diagram, our first try might look like this: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #non-nested-flow } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #non-nested-flow } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #non-nested-flow } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #non-nested-flow } It is clear however that there is no nesting present in our first attempt, since the library cannot figure out @@ -93,10 +93,10 @@ methods `withAttributes()` or `named()` (where the latter is a shorthand for add The following code demonstrates how to achieve the desired nesting: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #nested-flow } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #nested-flow } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #nested-flow } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #nested-flow } Once we have hidden the internals of our components, they act like any other built-in component of similar shape. If we hide some of the internals of our composites, the result looks just like if any other predefine component has been @@ -108,10 +108,10 @@ If we look at usage of built-in components, and our custom components, there is snippet below demonstrates. Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #reuse } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #reuse } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #reuse } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #reuse } ## Composing complex systems @@ -132,20 +132,20 @@ directed and non-directed cycles. The `runnable()` method of the `GraphDSL` obje general, closed, and runnable graph. For example the network on the diagram can be realized like this: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #complex-graph } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #complex-graph } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #complex-graph } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #complex-graph } In the code above we used the implicit port numbering feature (to make the graph more readable and similar to the diagram) and we imported `Source` s, `Sink` s and `Flow` s explicitly. It is possible to refer to the ports explicitly, and it is not necessary to import our linear operators via `add()`, so another version might look like this: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #complex-graph-alt } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #complex-graph-alt } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #complex-graph-alt } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #complex-graph-alt } Similar to the case in the first section, so far we have not considered modularity. We created a complex graph, but the layout is flat, not modularized. We will modify our example, and create a reusable component with the graph DSL. @@ -157,10 +157,10 @@ from the previous example, what remains is a partial graph: We can recreate a similar graph in code, using the DSL in a similar way than before: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #partial-graph } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #partial-graph } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #partial-graph } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #partial-graph } The only new addition is the return value of the builder block, which is a `Shape`. All operators (including `Source`, `BidiFlow`, etc) have a shape, which encodes the *typed* ports of the module. In our example @@ -176,10 +176,10 @@ it is a good practice to give names to modules to help debugging. Since our partial graph has the right shape, it can be already used in the simpler, linear DSL: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #partial-use } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #partial-use } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #partial-use } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #partial-use } It is not possible to use it as a `Flow` yet, though (i.e. we cannot call `.filter()` on it), but `Flow` has a `fromGraph()` method that adds the DSL to a `FlowShape`. There are similar methods on `Source`, @@ -192,10 +192,10 @@ To demonstrate this, we will create the following graph: The code version of the above closed graph might look like this: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #partial-flow-dsl } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #partial-flow-dsl } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #partial-flow-dsl } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #partial-flow-dsl } @@@ note @@ -208,10 +208,10 @@ We are still in debt of demonstrating that `RunnableGraph` is a component like a be embedded in graphs. In the following snippet we embed one closed graph in another: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #embed-closed } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #embed-closed } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #embed-closed } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #embed-closed } The type of the imported module indicates that the imported module has a `ClosedShape`, and so we are not able to wire it to anything else inside the enclosing closed graph. Nevertheless, this "island" is embedded properly, @@ -258,20 +258,20 @@ materialized type of @scala[`Promise[[Option[Int]]`] @java[`CompletableFuture`], and we propagate this to the parent by using `Keep.right` as the combiner function (indicated by the color *yellow* on the diagram): Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-2 } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-2 } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-2 } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-2 } As a third step, we create a composite `Sink`, using our `nestedFlow` as a building block. In this snippet, both the enclosed `Flow` and the folding `Sink` has a materialized value that is interesting for us, so @@ -279,10 +279,10 @@ we use `Keep.both` to get a `Pair` of them as the materialized type of `nestedSi *blue* on the diagram) Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-3 } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-3 } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-3 } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-3 } As the last example, we wire together `nestedSource` and `nestedSink` and we use a custom combiner function to create a yet another materialized type of the resulting `RunnableGraph`. This combiner function ignores @@ -290,12 +290,12 @@ the @scala[`Future[String]`] @java[`CompletionStage`] part, and wraps th (indicated by color *purple* on the diagram): Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-4 } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-4 } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-4a } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-4a } - @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-4b } + @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-4b } @@@ note @@ -319,10 +319,10 @@ The code below, a modification of an earlier example sets the `inputBuffer` attr on others: Scala -: @@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #attributes-inheritance } +: @@snip [CompositionDocSpec.scala](/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala) { #attributes-inheritance } Java -: @@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #attributes-inheritance } +: @@snip [CompositionDocTest.java](/akka-docs/src/test/java/jdocs/stream/CompositionDocTest.java) { #attributes-inheritance } The effect is, that each module inherits the `inputBuffer` attribute from its enclosing parent, unless it has the same attribute explicitly set. `nestedSource` gets the default attributes from the materializer itself. `nestedSink` diff --git a/akka-docs/src/main/paradox/stream/stream-cookbook.md b/akka-docs/src/main/paradox/stream/stream-cookbook.md index 8f94ed5aad..39c2a53973 100644 --- a/akka-docs/src/main/paradox/stream/stream-cookbook.md +++ b/akka-docs/src/main/paradox/stream/stream-cookbook.md @@ -36,19 +36,19 @@ The simplest solution is to use a `map` operation and use `println` to print the While this recipe is rather simplistic, it is often suitable for a quick debug session. Scala -: @@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #println-debug } +: @@snip [RecipeLoggingElements.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #println-debug } Java -: @@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #println-debug } +: @@snip [RecipeLoggingElements.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #println-debug } Another approach to logging is to use `log()` operation. This approach gives you more fine-grained control of logging levels for elements flowing through the stream, finish and failure of the stream. Scala -: @@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #log-custom } +: @@snip [RecipeLoggingElements.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #log-custom } Java -: @@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #log-custom } +: @@snip [RecipeLoggingElements.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #log-custom } ### Creating a source that continuously evaluates a function @@ -58,10 +58,10 @@ The simplest implementation is to use a `Source.repeat` that produces some arbit and then map those elements to the function evaluation. E.g. if we have some `builderFunction()`, we can use: Scala -: @@snip [RecipeSourceFromFunction.scala]($code$/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala) { #source-from-function } +: @@snip [RecipeSourceFromFunction.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala) { #source-from-function } Java -: @@snip [RecipeSourceFromFunction.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSourceFromFunction.java) { #source-from-function } +: @@snip [RecipeSourceFromFunction.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeSourceFromFunction.java) { #source-from-function } Note: if the element-builder function touches mutable state, then a guaranteed single-threaded source should be used instead; e.g. `Source.unfold` or `Source.unfoldResource`. @@ -76,10 +76,10 @@ in the form of @scala[`In => immutable.Seq[Out]`] @java[`In -> List`]. In t collection itself, so we can call @scala[`mapConcat(identity)`] @java[`mapConcat(l -> l)`]. Scala -: @@snip [RecipeFlattenSeq.scala]($code$/scala/docs/stream/cookbook/RecipeFlattenSeq.scala) { #flattening-seqs } +: @@snip [RecipeFlattenSeq.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala) { #flattening-seqs } Java -: @@snip [RecipeFlattenList.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java) { #flattening-lists } +: @@snip [RecipeFlattenList.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java) { #flattening-lists } ### Draining a stream to a strict collection @@ -93,18 +93,18 @@ The function `limit` or `take` should always be used in conjunction in order to For example, this is best avoided: Scala -: @@snip [RecipeSeq.scala]($code$/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-unsafe } +: @@snip [RecipeSeq.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-unsafe } Java -: @@snip [RecipeSeq.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-unsafe } +: @@snip [RecipeSeq.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-unsafe } Rather, use `limit` or `take` to ensure that the resulting @scala[`Seq`] @java[`List`] will contain only up to @scala[`max`] @java[`MAX_ALLOWED_SIZE`] elements: Scala -: @@snip [RecipeSeq.scala]($code$/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-safe } +: @@snip [RecipeSeq.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-safe } Java -: @@snip [RecipeSeq.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-safe } +: @@snip [RecipeSeq.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-safe } ### Calculating the digest of a ByteString stream @@ -122,11 +122,11 @@ be no downstream demand. Instead we call `emit` which will temporarily replace t demand comes in and then reset the operator state. It will then complete the operator. Scala -: @@snip [RecipeDigest.scala]($code$/scala/docs/stream/cookbook/RecipeDigest.scala) { #calculating-digest } +: @@snip [RecipeDigest.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala) { #calculating-digest } Java -: @@snip [RecipeDigest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest } -: @@snip [RecipeDigest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest2 } +: @@snip [RecipeDigest.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest } +: @@snip [RecipeDigest.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest2 } ### Parsing lines from a stream of ByteStrings @@ -138,10 +138,10 @@ needs to be parsed. The `Framing` helper @scala[object] @java[class] contains a convenience method to parse messages from a stream of `ByteString` s: Scala -: @@snip [RecipeParseLines.scala]($code$/scala/docs/stream/cookbook/RecipeParseLines.scala) { #parse-lines } +: @@snip [RecipeParseLines.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala) { #parse-lines } Java -: @@snip [RecipeParseLines.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeParseLines.java) { #parse-lines } +: @@snip [RecipeParseLines.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeParseLines.java) { #parse-lines } ### Dealing with compressed data streams @@ -151,10 +151,10 @@ The `Compression` helper @scala[object] @java[class] contains convenience method Gzip or Deflate. Scala -: @@snip [RecipeDecompress.scala]($code$/scala/docs/stream/cookbook/RecipeDecompress.scala) { #decompress-gzip } +: @@snip [RecipeDecompress.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala) { #decompress-gzip } Java -: @@snip [RecipeDecompress.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDecompress.java) { #decompress-gzip } +: @@snip [RecipeDecompress.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeDecompress.java) { #decompress-gzip } ### Implementing reduce-by-key @@ -184,10 +184,10 @@ number then the stream cannot continue without violating its resource bound, in this case `groupBy` will terminate with a failure. Scala -: @@snip [RecipeReduceByKey.scala]($code$/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #word-count } +: @@snip [RecipeReduceByKey.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #word-count } Java -: @@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #word-count } +: @@snip [RecipeReduceByKeyTest.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #word-count } By extracting the parts specific to *wordcount* into @@ -198,11 +198,11 @@ By extracting the parts specific to *wordcount* into we get a generalized version below: Scala -: @@snip [RecipeReduceByKey.scala]($code$/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #reduce-by-key-general } +: @@snip [RecipeReduceByKey.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #reduce-by-key-general } Java -: @@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general } -: @@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general2 } +: @@snip [RecipeReduceByKeyTest.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general } +: @@snip [RecipeReduceByKeyTest.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general2 } @@@ note @@ -227,10 +227,10 @@ will be emitted. This is achieved by using `mapConcat` belongs to) and feed it into groupBy, using the topic as the group key. Scala -: @@snip [RecipeMultiGroupBy.scala]($code$/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala) { #multi-groupby } +: @@snip [RecipeMultiGroupBy.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala) { #multi-groupby } Java -: @@snip [RecipeMultiGroupByTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java) { #multi-groupby } +: @@snip [RecipeMultiGroupByTest.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java) { #multi-groupby } ### Adhoc source @@ -240,10 +240,10 @@ Also, you want to shutdown it down when there is no more demand, and start it up You can achieve this behavior by combining `lazily`, `backpressureTimeout` and `recoverWithRetries` as follows: Scala -: @@snip [RecipeAdhocSource.scala]($code$/scala/docs/stream/cookbook/RecipeAdhocSource.scala) { #adhoc-source } +: @@snip [RecipeAdhocSource.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala) { #adhoc-source } Java -: @@snip [RecipeAdhocSourceTest.scala]($code$/java/jdocs/stream/javadsl/cookbook/RecipeAdhocSourceTest.java) { #adhoc-source } +: @@snip [RecipeAdhocSourceTest.scala](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeAdhocSourceTest.java) { #adhoc-source } ## Working with Operators @@ -260,10 +260,10 @@ This recipe solves the problem by zipping the stream of `Message` elements with signals. Since `Zip` produces pairs, we map the output stream selecting the first element of the pair. Scala -: @@snip [RecipeManualTrigger.scala]($code$/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream } +: @@snip [RecipeManualTrigger.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream } Java -: @@snip [RecipeManualTrigger.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream } +: @@snip [RecipeManualTrigger.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream } Alternatively, instead of using a `Zip`, and then using `map` to get the first element of the pairs, we can avoid creating the pairs in the first place by using `ZipWith` which takes a two argument function to produce the output @@ -271,10 +271,10 @@ element. If this function would return a pair of the two argument it would be ex `ZipWith` is a generalization of zipping. Scala -: @@snip [RecipeManualTrigger.scala]($code$/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream-zipwith } +: @@snip [RecipeManualTrigger.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream-zipwith } Java -: @@snip [RecipeManualTrigger.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream-zipwith } +: @@snip [RecipeManualTrigger.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream-zipwith } ### Balancing jobs to a fixed pool of workers @@ -293,11 +293,11 @@ we wire the outputs of these workers to a `Merge` element that will collect the To make the worker operators run in parallel we mark them as asynchronous with *async*. Scala -: @@snip [RecipeWorkerPool.scala]($code$/scala/docs/stream/cookbook/RecipeWorkerPool.scala) { #worker-pool } +: @@snip [RecipeWorkerPool.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala) { #worker-pool } Java -: @@snip [RecipeWorkerPool.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool } -: @@snip [RecipeWorkerPool.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool2 } +: @@snip [RecipeWorkerPool.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool } +: @@snip [RecipeWorkerPool.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool2 } ## Working with rate @@ -317,10 +317,10 @@ When the upstream is faster, the reducing process of the `conflate` starts. Our the freshest element. This in a simple dropping operation. Scala -: @@snip [RecipeSimpleDrop.scala]($code$/scala/docs/stream/cookbook/RecipeSimpleDrop.scala) { #simple-drop } +: @@snip [RecipeSimpleDrop.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala) { #simple-drop } Java -: @@snip [RecipeSimpleDrop.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java) { #simple-drop } +: @@snip [RecipeSimpleDrop.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java) { #simple-drop } There is a more general version of `conflate` named `conflateWithSeed` that allows to express more complex aggregations, more similar to a `fold`. @@ -338,11 +338,11 @@ between the different consumers (the buffer smooths out small rate variances), b progress by dropping from the buffer of the slow consumers if necessary. Scala -: @@snip [RecipeDroppyBroadcast.scala]($code$/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala) { #droppy-bcast } +: @@snip [RecipeDroppyBroadcast.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala) { #droppy-bcast } Java -: @@snip [RecipeDroppyBroadcast.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast } -: @@snip [RecipeDroppyBroadcast.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast2 } +: @@snip [RecipeDroppyBroadcast.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast } +: @@snip [RecipeDroppyBroadcast.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast2 } ### Collecting missed ticks @@ -362,10 +362,10 @@ As a result, we have a flow of `Int` where the number represents the missed tick able to consume the tick fast enough (i.e. zero means: 1 non-missed tick + 0 missed ticks) Scala -: @@snip [RecipeMissedTicks.scala]($code$/scala/docs/stream/cookbook/RecipeMissedTicks.scala) { #missed-ticks } +: @@snip [RecipeMissedTicks.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala) { #missed-ticks } Java -: @@snip [RecipeMissedTicks.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java) { #missed-ticks } +: @@snip [RecipeMissedTicks.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java) { #missed-ticks } ### Create a stream processor that repeats the last element seen @@ -380,10 +380,10 @@ to feed the downstream if no upstream element is ready yet. In the `onPush()` ha is very similar, we immediately relieve the downstream by emitting `currentValue`. Scala -: @@snip [RecipeHold.scala]($code$/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-1 } +: @@snip [RecipeHold.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-1 } Java -: @@snip [RecipeHold.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-1 } +: @@snip [RecipeHold.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-1 } While it is relatively simple, the drawback of the first version is that it needs an arbitrary initial element which is not always possible to provide. Hence, we create a second version where the downstream might need to wait in one single @@ -397,10 +397,10 @@ first element comes in we must check if there possibly already was demand from d push the element directly. Scala -: @@snip [RecipeHold.scala]($code$/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-2 } +: @@snip [RecipeHold.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-2 } Java -: @@snip [RecipeHold.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-2 } +: @@snip [RecipeHold.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-2 } ### Globally limiting the rate of a set of streams @@ -421,20 +421,20 @@ message, we increment the pending permits counter and send a reply to each of th waiting senders than permits available we will stay in the `closed` state. Scala -: @@snip [RecipeGlobalRateLimit.scala]($code$/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-actor } +: @@snip [RecipeGlobalRateLimit.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-actor } Java -: @@snip [RecipeGlobalRateLimit.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-actor } +: @@snip [RecipeGlobalRateLimit.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-actor } To create a Flow that uses this global limiter actor we use the `mapAsync` function with the combination of the `ask` pattern. We also define a timeout, so if a reply is not received during the configured maximum wait period the returned future from `ask` will fail, which will fail the corresponding stream as well. Scala -: @@snip [RecipeGlobalRateLimit.scala]($code$/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-flow } +: @@snip [RecipeGlobalRateLimit.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-flow } Java -: @@snip [RecipeGlobalRateLimit.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-flow } +: @@snip [RecipeGlobalRateLimit.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-flow } @@@ note @@ -462,11 +462,11 @@ Both `onPush()` and `onPull()` calls `emitChunk()` the only difference is that t the incoming chunk by appending to the end of the buffer. Scala -: @@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytestring-chunker } +: @@snip [RecipeByteStrings.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytestring-chunker } Java -: @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker } -: @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker2 } +: @@snip [RecipeByteStrings.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker } +: @@snip [RecipeByteStrings.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker2 } ### Limit the number of bytes passing through a stream of ByteStrings @@ -478,11 +478,11 @@ This recipe uses a @ref[`GraphStage`](stream-customize.md) to implement the desi we signal failure, otherwise we forward the chunk we have received. Scala -: @@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytes-limiter } +: @@snip [RecipeByteStrings.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytes-limiter } Java -: @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter } - @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter2 } +: @@snip [RecipeByteStrings.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter } + @@snip [RecipeByteStrings.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter2 } ### Compact ByteStrings in a stream of ByteStrings @@ -494,10 +494,10 @@ The recipe is a simple use of map, calling the `compact()` method of the `ByteSt copying of the underlying arrays, so this should be the last element of a long chain if used. Scala -: @@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #compacting-bytestrings } +: @@snip [RecipeByteStrings.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #compacting-bytestrings } Java -: @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #compacting-bytestrings } +: @@snip [RecipeByteStrings.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #compacting-bytestrings } ### Injecting keep-alive messages into a stream of ByteStrings @@ -507,7 +507,7 @@ but only if this does not interfere with normal traffic. There is a built-in operation that allows to do this directly: Scala -: @@snip [RecipeKeepAlive.scala]($code$/scala/docs/stream/cookbook/RecipeKeepAlive.scala) { #inject-keepalive } +: @@snip [RecipeKeepAlive.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala) { #inject-keepalive } Java -: @@snip [RecipeKeepAlive.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java) { #inject-keepalive } +: @@snip [RecipeKeepAlive.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java) { #inject-keepalive } diff --git a/akka-docs/src/main/paradox/stream/stream-customize.md b/akka-docs/src/main/paradox/stream/stream-customize.md index 475701f7f5..89c9cad892 100644 --- a/akka-docs/src/main/paradox/stream/stream-customize.md +++ b/akka-docs/src/main/paradox/stream/stream-customize.md @@ -38,10 +38,10 @@ cancelled. To start, we need to define the "interface" of our operator, which is (this is explained in more detail in the section @ref:[Modularity, Composition and Hierarchy](stream-composition.md)). This is how this looks like: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #boilerplate-example } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #boilerplate-example } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #simple-source } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #simple-source } As you see, in itself the `GraphStage` only defines the ports of this operator and a shape that contains the ports. It also has, a currently unimplemented method called `createLogic`. If you recall, operators are reusable in multiple @@ -66,7 +66,7 @@ to stop the operator, we don't need to override it. In the `onPull` callback we is how it looks like in the end: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #custom-source-example } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #custom-source-example } Instances of the above `GraphStage` are subclasses of @scala[`Graph[SourceShape[Int],NotUsed]`] @java[`Graph,NotUsed>`] which means that they are already usable in many situations, but do not provide the DSL methods we usually have for other @@ -75,10 +75,10 @@ that they are already usable in many situations, but do not provide the DSL meth source as any other built-in one: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #simple-source-usage } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #simple-source-usage } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #simple-source-usage } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #simple-source-usage } Similarly, to create a custom `Sink` one can register a subclass `InHandler` with the operator `Inlet`. The `onPush()` callback is used to signal the handler a new element has been pushed to the operator, @@ -87,10 +87,10 @@ Please note, most Sinks would need to request upstream elements as soon as they done by calling `pull(inlet)` in the `preStart()` callback. Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #custom-sink-example } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #custom-sink-example } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #simple-sink } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #simple-sink } ### Port states, @scala[InHandler] @java[AbstractInHandler] and @scala[OutHandler] @java[AbstractOutHandler] @@ -199,10 +199,10 @@ Map calls `push(out)` from the `onPush()` handler and it also calls `pull()` fro conceptual wiring above, and fully expressed in code below: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #one-to-one } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #one-to-one } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #one-to-one } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #one-to-one } Map is a typical example of a one-to-one transformation of a stream where demand is passed along upstream elements passed on downstream. @@ -218,10 +218,10 @@ example by adding a conditional in the `onPush` handler and decide between a `pu (and not having a mapping `f` function). Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #many-to-one } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #many-to-one } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #many-to-one } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #many-to-one } To complete the picture we define a one-to-many transformation as the next step. We chose a straightforward example operator that emits every upstream element twice downstream. The conceptual wiring of this operator looks like this: @@ -233,10 +233,10 @@ has duplicated this last element already or not. We must also make sure to emit if the upstream completes. Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #one-to-many } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #one-to-many } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #one-to-many } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #one-to-many } In this case a pull from downstream might be consumed by the operator itself rather than passed along upstream as the operator might contain an element it wants to @@ -250,10 +250,10 @@ This example can be simplified by replacing the usage of a mutable state with ca reinstate the original handlers: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #simpler-one-to-many } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #simpler-one-to-many } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #simpler-one-to-many } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #simpler-one-to-many } Finally, to demonstrate all of the operators above, we put them together into a processing chain, which conceptually would correspond to the following structure: @@ -263,10 +263,10 @@ which conceptually would correspond to the following structure: In code this is only a few lines, using the `via` use our custom operators in a stream: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #graph-operator-chain } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #graph-operator-chain } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #graph-operator-chain } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #graph-operator-chain } If we attempt to draw the sequence of events, it shows that there is one "event token" in circulation in a potential chain of operators, just like our conceptual "railroad tracks" representation predicts. @@ -318,10 +318,10 @@ See @ref:[Using the SLF4J API directly](../logging.md#slf4j-directly) for more d The operator then gets access to the `log` field which it can safely use from any `GraphStage` callbacks: Scala -: @@snip [GraphStageLoggingDocSpec.scala]($code$/scala/docs/stream/GraphStageLoggingDocSpec.scala) { #operator-with-logging } +: @@snip [GraphStageLoggingDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala) { #operator-with-logging } Java -: @@snip [GraphStageLoggingDocTest.java]($code$/java/jdocs/stream/GraphStageLoggingDocTest.java) { #operator-with-logging } +: @@snip [GraphStageLoggingDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageLoggingDocTest.java) { #operator-with-logging } @@@ note @@ -347,10 +347,10 @@ operator starts out as closed but as soon as an element is pushed downstream the of time during which it will consume and drop upstream messages: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #timed } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #timed } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #timed } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #timed } ### Using asynchronous side-channels @@ -369,10 +369,10 @@ This example shows an asynchronous side channel operator that starts dropping el when a future completes: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #async-side-channel } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #async-side-channel } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #async-side-channel } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #async-side-channel } ### Integration with actors @@ -409,10 +409,10 @@ necessary (non-blocking) synchronization and visibility guarantees to this share In this sample the materialized value is a future containing the first element to go through the stream: Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #materialized } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #materialized } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #materialized } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #materialized } ### Using attributes to affect the behavior of an operator @@ -456,10 +456,10 @@ initialization. The buffer has demand for up to two elements without any downstr The following code example demonstrates a buffer class corresponding to the message sequence chart above. Scala -: @@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #detached } +: @@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #detached } Java -: @@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #detached } +: @@snip [GraphStageDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphStageDocTest.java) { #detached } ## Thread safety of custom operators @@ -512,11 +512,11 @@ extensions to `Source` and `Flow` see [this sketch by R. Kuhn](https://gist.gith A lot simpler is the task of adding an extension method to `Source` as shown below: -@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #extending-source } +@@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #extending-source } The analog works for `Flow` as well: -@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #extending-flow } +@@snip [GraphStageDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala) { #extending-flow } If you try to write this for `SubFlow`, though, you will run into the same issue as when trying to unify the two solutions above, only on a higher level (the type constructors needed for that unification would have rank diff --git a/akka-docs/src/main/paradox/stream/stream-dynamic.md b/akka-docs/src/main/paradox/stream/stream-dynamic.md index b466920e65..d8422249b6 100644 --- a/akka-docs/src/main/paradox/stream/stream-dynamic.md +++ b/akka-docs/src/main/paradox/stream/stream-dynamic.md @@ -24,7 +24,7 @@ The `KillSwitch` @scala[trait] @java[interface] allows to: Scala -: @@snip [KillSwitch.scala]($akka$/akka-stream/src/main/scala/akka/stream/KillSwitch.scala) { #kill-switch } +: @@snip [KillSwitch.scala](/akka-stream/src/main/scala/akka/stream/KillSwitch.scala) { #kill-switch } After the first call to either `shutdown` or `abort`, all subsequent calls to any of these methods will be ignored. Stream completion is performed by both @@ -43,18 +43,18 @@ below for usage examples. * **Shutdown** Scala -: @@snip [KillSwitchDocSpec.scala]($code$/scala/docs/stream/KillSwitchDocSpec.scala) { #unique-shutdown } +: @@snip [KillSwitchDocSpec.scala](/akka-docs/src/test/scala/docs/stream/KillSwitchDocSpec.scala) { #unique-shutdown } Java -: @@snip [KillSwitchDocTest.java]($code$/java/jdocs/stream/KillSwitchDocTest.java) { #unique-shutdown } +: @@snip [KillSwitchDocTest.java](/akka-docs/src/test/java/jdocs/stream/KillSwitchDocTest.java) { #unique-shutdown } * **Abort** Scala -: @@snip [KillSwitchDocSpec.scala]($code$/scala/docs/stream/KillSwitchDocSpec.scala) { #unique-abort } +: @@snip [KillSwitchDocSpec.scala](/akka-docs/src/test/scala/docs/stream/KillSwitchDocSpec.scala) { #unique-abort } Java -: @@snip [KillSwitchDocTest.java]($code$/java/jdocs/stream/KillSwitchDocTest.java) { #unique-abort } +: @@snip [KillSwitchDocTest.java](/akka-docs/src/test/java/jdocs/stream/KillSwitchDocTest.java) { #unique-abort } ### SharedKillSwitch @@ -66,18 +66,18 @@ Refer to the below for usage examples. * **Shutdown** Scala -: @@snip [KillSwitchDocSpec.scala]($code$/scala/docs/stream/KillSwitchDocSpec.scala) { #shared-shutdown } +: @@snip [KillSwitchDocSpec.scala](/akka-docs/src/test/scala/docs/stream/KillSwitchDocSpec.scala) { #shared-shutdown } Java -: @@snip [KillSwitchDocTest.java]($code$/java/jdocs/stream/KillSwitchDocTest.java) { #shared-shutdown } +: @@snip [KillSwitchDocTest.java](/akka-docs/src/test/java/jdocs/stream/KillSwitchDocTest.java) { #shared-shutdown } * **Abort** Scala -: @@snip [KillSwitchDocSpec.scala]($code$/scala/docs/stream/KillSwitchDocSpec.scala) { #shared-abort } +: @@snip [KillSwitchDocSpec.scala](/akka-docs/src/test/scala/docs/stream/KillSwitchDocSpec.scala) { #shared-abort } Java -: @@snip [KillSwitchDocTest.java]($code$/java/jdocs/stream/KillSwitchDocTest.java) { #shared-abort } +: @@snip [KillSwitchDocTest.java](/akka-docs/src/test/java/jdocs/stream/KillSwitchDocTest.java) { #shared-abort } @@@ note @@ -103,10 +103,10 @@ It is not possible to attach any producers until this `Source` has been material by the fact that we only get the corresponding `Sink` as a materialized value. Usage might look like this: Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #merge-hub } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #merge-hub } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #merge-hub } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #merge-hub } This sequence, while might look odd at first, ensures proper startup order. Once we get the `Sink`, we can use it as many times as wanted. Everything that is fed to it will be delivered to the consumer we attached @@ -120,10 +120,10 @@ to which the single producer must be attached first. Consumers can only be attac been materialized (i.e. the producer has been started). One example of using the `BroadcastHub`: Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #broadcast-hub } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #broadcast-hub } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #broadcast-hub } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #broadcast-hub } The resulting `Source` can be materialized any number of times, each materialization effectively attaching a new subscriber. If there are no subscribers attached to this hub then it will not drop any elements but instead @@ -144,20 +144,20 @@ we materialize this small stream, we get back a pair of `Source` and `Sink` that the publish and subscribe sides of our channel. Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-1 } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-1 } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-1 } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #pub-sub-1 } We now use a few tricks to add more features. First of all, we attach a `Sink.ignore` at the broadcast side of the channel to keep it drained when there are no subscribers. If this behavior is not the desired one this line can be dropped. Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-2 } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-2 } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-2 } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #pub-sub-2 } We now wrap the `Sink` and `Source` in a `Flow` using `Flow.fromSinkAndSource`. This bundles up the two sides of the channel into one and forces users of it to always define a publisher and subscriber side @@ -168,20 +168,20 @@ Finally, we add `backpressureTimeout` on the consumer side to ensure that subscr than 3 seconds are forcefully removed (and their stream failed). Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-3 } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-3 } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-3 } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #pub-sub-3 } The resulting Flow now has a type of `Flow[String, String, UniqueKillSwitch]` representing a publish-subscribe channel which can be used any number of times to attach new producers or consumers. In addition, it materializes to a `UniqueKillSwitch` (see [UniqueKillSwitch](#unique-kill-switch)) that can be used to deregister a single user externally: Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-4 } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-4 } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-4 } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #pub-sub-4 } ### Using the PartitionHub @@ -195,10 +195,10 @@ to which the single producer must be attached first. Consumers can only be attac been materialized (i.e. the producer has been started). One example of using the `PartitionHub`: Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #partition-hub } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #partition-hub } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #partition-hub } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #partition-hub } The `partitioner` function takes two parameters; the first is the number of active consumers and the second is the stream element. The function should return the index of the selected consumer for the given element, @@ -220,17 +220,17 @@ The above example illustrate a stateless partition function. For more advanced s @scala[`statefulSink`] can be used. Here is an example of a stateful round-robin function: Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #partition-hub-stateful } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #partition-hub-stateful } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #partition-hub-stateful } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #partition-hub-stateful } Note that it is a factory of a function to to be able to hold stateful variables that are unique for each materialization. @java[In this example the `partitioner` function is implemented as a class to be able to hold the mutable variable. A new instance of `RoundRobin` is created for each materialization of the hub.] @@@ div { .group-java } -@@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #partition-hub-stateful-function } +@@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #partition-hub-stateful-function } @@@ The function takes two parameters; the first is information about active consumers, including an array of @@ -245,7 +245,7 @@ Note that this is a moving target since the elements are consumed concurrently. a hub that routes to the consumer with least buffered elements: Scala -: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #partition-hub-fastest } +: @@snip [HubsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala) { #partition-hub-fastest } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #partition-hub-fastest } +: @@snip [HubDocTest.java](/akka-docs/src/test/java/jdocs/stream/HubDocTest.java) { #partition-hub-fastest } diff --git a/akka-docs/src/main/paradox/stream/stream-error.md b/akka-docs/src/main/paradox/stream/stream-error.md index dffb8b60f1..320b3b151d 100644 --- a/akka-docs/src/main/paradox/stream/stream-error.md +++ b/akka-docs/src/main/paradox/stream/stream-error.md @@ -31,10 +31,10 @@ inside an actor, and have the actor restart the entire stream on failure. The below stream fails with `ArithmeticException` when the element `0` goes through the `map` operator, Scala -: @@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #log-error } +: @@snip [RecipeLoggingElements.scala](/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #log-error } Java -: @@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #log-error } +: @@snip [RecipeLoggingElements.java](/akka-docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #log-error } and error messages like below will be logged. @@ -57,18 +57,18 @@ Recovering can be useful if you want to gracefully complete a stream on failure downstream know that there was a failure. Scala -: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #recover } +: @@snip [FlowErrorDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala) { #recover } Java -: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #recover } +: @@snip [FlowErrorDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java) { #recover } This will output: Scala -: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #recover-output } +: @@snip [FlowErrorDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala) { #recover-output } Java -: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #recover-output } +: @@snip [FlowErrorDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java) { #recover-output } ## Recover with retries @@ -80,19 +80,19 @@ Deciding which exceptions should be recovered is done through a `PartialFunction does not have a @scala[matching case] @java[match defined] the stream is failed. Scala -: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #recoverWithRetries } +: @@snip [FlowErrorDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala) { #recoverWithRetries } Java -: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #recoverWithRetries } +: @@snip [FlowErrorDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java) { #recoverWithRetries } This will output: Scala -: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #recoverWithRetries-output } +: @@snip [FlowErrorDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala) { #recoverWithRetries-output } Java -: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #recoverWithRetries-output } +: @@snip [FlowErrorDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java) { #recoverWithRetries-output } @@ -116,10 +116,10 @@ be made again, in increasing intervals of 3, 6, 12, 24 and finally 30 seconds (a to the `maxBackoff` parameter): Scala -: @@snip [RestartDocSpec.scala]($code$/scala/docs/stream/RestartDocSpec.scala) { #restart-with-backoff-source } +: @@snip [RestartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala) { #restart-with-backoff-source } Java -: @@snip [RestartDocTest.java]($code$/java/jdocs/stream/RestartDocTest.java) { #restart-with-backoff-source } +: @@snip [RestartDocTest.java](/akka-docs/src/test/java/jdocs/stream/RestartDocTest.java) { #restart-with-backoff-source } Using a `randomFactor` to add a little bit of additional variance to the backoff intervals is highly recommended, in order to avoid multiple streams re-start at the exact same point in time, @@ -132,10 +132,10 @@ The above `RestartSource` will never terminate unless the `Sink` it's fed into c it in combination with a @ref:[`KillSwitch`](stream-dynamic.md#kill-switch), so that you can terminate it when needed: Scala -: @@snip [RestartDocSpec.scala]($code$/scala/docs/stream/RestartDocSpec.scala) { #with-kill-switch } +: @@snip [RestartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala) { #with-kill-switch } Java -: @@snip [RestartDocTest.java]($code$/java/jdocs/stream/RestartDocTest.java) { #with-kill-switch } +: @@snip [RestartDocTest.java](/akka-docs/src/test/java/jdocs/stream/RestartDocTest.java) { #with-kill-switch } Sinks and flows can also be supervised, using @scala[`akka.stream.scaladsl.RestartSink` and `akka.stream.scaladsl.RestartFlow`] @java[`akka.stream.javadsl.RestartSink` and `akka.stream.javadsl.RestartFlow`]. The `RestartSink` is restarted when @@ -193,18 +193,18 @@ By default the stopping strategy is used for all exceptions, i.e. the stream wil failure when an exception is thrown. Scala -: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #stop } +: @@snip [FlowErrorDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala) { #stop } Java -: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #stop } +: @@snip [FlowErrorDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java) { #stop } The default supervision strategy for a stream can be defined on the settings of the materializer. Scala -: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #resume } +: @@snip [FlowErrorDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala) { #resume } Java -: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #resume } +: @@snip [FlowErrorDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java) { #resume } Here you can see that all `ArithmeticException` will resume the processing, i.e. the elements that cause the division by zero are effectively dropped. @@ -219,19 +219,19 @@ cycles, as explained in @ref:[Graph cycles, liveness and deadlocks](stream-graph The supervision strategy can also be defined for all operators of a flow. Scala -: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #resume-section } +: @@snip [FlowErrorDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala) { #resume-section } Java -: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #resume-section } +: @@snip [FlowErrorDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java) { #resume-section } `Restart` works in a similar way as `Resume` with the addition that accumulated state, if any, of the failing processing operator will be reset. Scala -: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #restart-section } +: @@snip [FlowErrorDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala) { #restart-section } Java -: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #restart-section } +: @@snip [FlowErrorDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java) { #restart-section } ### Errors from mapAsync @@ -244,18 +244,18 @@ discard those that cannot be found. We start with the tweet stream of authors: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #tweet-authors } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #tweet-authors } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #tweet-authors } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #tweet-authors } Assume that we can lookup their email address using: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-address-lookup2 } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #email-address-lookup2 } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-address-lookup2 } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #email-address-lookup2 } The @scala[`Future`] @java[`CompletionStage`] is completed @scala[with `Failure`] @java[normally] if the email is not found. @@ -264,10 +264,10 @@ service can be done with `mapAsync` and we use @scala[`Supervision.resumingDecid unknown email addresses: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-addresses-mapAsync-supervision } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #email-addresses-mapAsync-supervision } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync-supervision } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync-supervision } If we would not use `Resume` the default stopping strategy would complete the stream with failure on the first @scala[`Future`] @java[`CompletionStage`] that was completed @scala[with `Failure`]@java[exceptionally]. diff --git a/akka-docs/src/main/paradox/stream/stream-flows-and-basics.md b/akka-docs/src/main/paradox/stream/stream-flows-and-basics.md index e50fec87d7..30d60373d5 100644 --- a/akka-docs/src/main/paradox/stream/stream-flows-and-basics.md +++ b/akka-docs/src/main/paradox/stream/stream-flows-and-basics.md @@ -90,10 +90,10 @@ thread-safe, and freely shareable*, which means that it is for example safe to s one actor prepare the work, and then have it be materialized at some completely different place in the code. Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #materialization-in-steps } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #materialization-in-steps } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #materialization-in-steps } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #materialization-in-steps } @@@ div { .group-scala } @@ -127,19 +127,19 @@ both a `Source` and a `Sink` (in order to run a `Flow`, since it has neither att @@@ Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #materialization-runWith } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #materialization-runWith } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #materialization-runWith } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #materialization-runWith } It is worth pointing out that since operators are *immutable*, connecting them returns a new operator, instead of modifying the existing instance, so while constructing long flows, remember to assign the new value to a variable or run it: Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #source-immutable } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #source-immutable } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #source-immutable } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #source-immutable } @@@ note @@ -160,10 +160,10 @@ variable. Both materializations give us a different @scala[`Future`] @java[`Comp to refer to the future: Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #stream-reuse } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #stream-reuse } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #stream-reuse } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #stream-reuse } ### Defining sources, sinks and flows @@ -171,18 +171,18 @@ The objects `Source` and `Sink` define various ways to create sources and sinks examples show some of the most useful constructs (refer to the API documentation for more details): Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #source-sink } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #source-sink } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #source-sink } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #source-sink } There are various ways to wire up different parts of a stream, the following examples show some of the available options: Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #flow-connecting } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #flow-connecting } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #flow-connecting } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #flow-connecting } ### Illegal stream elements @@ -297,10 +297,10 @@ operators by way of adding `Attributes.asyncBoundary` using the method `async` o to operators that shall communicate with the downstream of the graph in an asynchronous fashion. Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #flow-async } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #flow-async } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #flow-async } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #flow-async } In this example we create two regions within the flow which will be executed in one Actor each—assuming that adding and multiplying integers is an extremely costly operation this will lead to a performance gain since two CPUs can @@ -334,10 +334,10 @@ many operator methods have variants that take an additional argument, a function resulting values. Some examples of using these combiners are illustrated in the example below. Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #flow-mat-combine } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #flow-mat-combine } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #flow-mat-combine } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #flow-mat-combine } @@@ note @@ -354,10 +354,10 @@ By using the `preMaterialize` operator on a `Source`, you can obtain its materia to consume messages from the original `Source`. Note that this can be materialized multiple times. Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #source-prematerialization } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #source-prematerialization } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #source-prematerialization } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #source-prematerialization } ## Stream ordering @@ -388,10 +388,10 @@ The usual way of creating an `ActorMaterializer` is to create it next to your `A which likely is in a "main" class of your application: Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #materializer-from-system } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #materializer-from-system } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #materializer-from-system } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #materializer-from-system } In this case the streams run by the materializer will run until it is shut down. When the materializer is shut down *before* the streams have run to completion, they will be terminated abruptly. This is a little different than the @@ -402,10 +402,10 @@ normal completion signals to manage the lifecycles of your streams. If we look at the following example, where we create the `ActorMaterializer` within an `Actor`: Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #materializer-from-actor-context } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #materializer-from-actor-context } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #materializer-from-actor-context } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #materializer-from-actor-context } In the above example we used the `ActorContext` to create the materializer. This binds its lifecycle to the surrounding `Actor`. In other words, while the stream we started there would under normal circumstances run forever, if we stop the Actor it would terminate the stream as well. We have *bound the stream's lifecycle to the surrounding actor's lifecycle*. This is a very useful technique if the stream is closely related to the actor, e.g. when the actor represents a user or other entity, that we continuously query using the created stream -- and it would not make sense to keep the stream alive when the actor has terminated already. The streams termination will be signalled by an "Abrupt termination exception" signaled by the stream. @@ -417,10 +417,10 @@ For example, you are using an Akka stream to push some large stream of data to a You may want to eagerly stop the Actor since it has performed all of its duties already: Scala -: @@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #materializer-from-system-in-actor } +: @@snip [FlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala) { #materializer-from-system-in-actor } Java -: @@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #materializer-from-system-in-actor } +: @@snip [FlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowDocTest.java) { #materializer-from-system-in-actor } In the above example we pass in a materializer to the Actor, which results in binding its lifecycle to the entire `ActorSystem` rather than the single enclosing actor. This can be useful if you want to share a materializer or group streams into specific materializers, for example because of the materializer's settings etc. diff --git a/akka-docs/src/main/paradox/stream/stream-graphs.md b/akka-docs/src/main/paradox/stream/stream-graphs.md index 2b68acde5c..db6d28573a 100644 --- a/akka-docs/src/main/paradox/stream/stream-graphs.md +++ b/akka-docs/src/main/paradox/stream/stream-graphs.md @@ -61,10 +61,10 @@ or ending a `Flow`. @scala[Junctions must always be created with defined type pa will be inferred.] Scala -: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #simple-graph-dsl } +: @@snip [GraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala) { #simple-graph-dsl } Java -: @@snip [GraphDSLTest.java]($akka$/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #simple-graph-dsl } +: @@snip [GraphDSLTest.java](/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #simple-graph-dsl } @@@ note @@ -96,19 +96,19 @@ in which we re-use the same instance of `Flow`, yet it will properly be materialized as two connections between the corresponding Sources and Sinks: Scala -: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-reusing-a-flow } +: @@snip [GraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-reusing-a-flow } Java -: @@snip [GraphDSLTest.java]($akka$/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #graph-dsl-reusing-a-flow } +: @@snip [GraphDSLTest.java](/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #graph-dsl-reusing-a-flow } In some cases we may have a list of graph elements, for example if they are dynamically created. If these graphs have similar signatures, we can construct a graph collecting all their materialized values as a collection: Scala -: @@snip [GraphOpsIntegrationSpec.scala]($akka$/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala) { #graph-from-list } +: @@snip [GraphOpsIntegrationSpec.scala](/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala) { #graph-from-list } Java -: @@snip [GraphDSLTest.java]($akka$/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #graph-from-list } +: @@snip [GraphDSLTest.java](/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #graph-from-list } @@ -132,10 +132,10 @@ the greatest int value of each zipped triple. We'll want to expose 3 input ports (unconnected sink). Scala -: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #simple-partial-graph-dsl } +: @@snip [StreamPartialGraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #simple-partial-graph-dsl } Java -: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #simple-partial-graph-dsl } +: @@snip [StreamPartialGraphDSLDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #simple-partial-graph-dsl } @@@ note @@ -187,20 +187,20 @@ Refer to the example below, in which we create a Source that zips together two n construction in action: Scala -: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-from-partial-graph-dsl } +: @@snip [StreamPartialGraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-from-partial-graph-dsl } Java -: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-from-partial-graph-dsl } +: @@snip [StreamPartialGraphDSLDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-from-partial-graph-dsl } Similarly the same can be done for a @scala[`Sink[T]`]@java[`Sink`], using `SinkShape.of` in which case the provided value must be an @scala[`Inlet[T]`]@java[`Inlet`]. For defining a @scala[`Flow[T]`]@java[`Flow`] we need to expose both an @scala[inlet and an outlet]@java[undefined source and sink]: Scala -: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #flow-from-partial-graph-dsl } +: @@snip [StreamPartialGraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #flow-from-partial-graph-dsl } Java -: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #flow-from-partial-graph-dsl } +: @@snip [StreamPartialGraphDSLDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #flow-from-partial-graph-dsl } ## Combining Sources and Sinks with simplified API @@ -211,19 +211,19 @@ without the need for using the Graph DSL. The combine method takes care of const the necessary graph underneath. In following example we combine two sources into one (fan-in): Scala -: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-combine } +: @@snip [StreamPartialGraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-combine } Java -: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-combine } +: @@snip [StreamPartialGraphDSLDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-combine } The same can be done for a @scala[`Sink[T]`]@java[`Sink`] but in this case it will be fan-out: Scala -: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #sink-combine } +: @@snip [StreamPartialGraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #sink-combine } Java -: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #sink-combine } +: @@snip [StreamPartialGraphDSLDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #sink-combine } ## Building reusable Graph components @@ -241,7 +241,7 @@ Altogether, our junction will have two input ports of type `I` (for the normal a of type `O`. To represent this interface, we need to define a custom `Shape`. The following lines show how to do that. Scala -: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape } +: @@snip [GraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape } @@ -262,7 +262,7 @@ Since our shape has two input ports and one output port, we can use the `FanInSh our custom shape: Scala -: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape2 } +: @@snip [GraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape2 } @@ -272,7 +272,7 @@ to a `Balance` junction which will fan-out to a configurable number of workers ( results together and send them out through our only output port. This is expressed by the following code: Scala -: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-create } +: @@snip [GraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-create } @@ -282,7 +282,7 @@ and jobs using plain strings and prints out the results. Actually we used *two* using `add()` twice. Scala -: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-use } +: @@snip [GraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-use } @@ -300,17 +300,17 @@ this purpose exists the special type `BidiFlow` which is a graph that has exactly two open inlets and two open outlets. The corresponding shape is called `BidiShape` and is defined like this: -@@snip [Shape.scala]($akka$/akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } +@@snip [Shape.scala](/akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } A bidirectional flow is defined just like a unidirectional `Flow` as demonstrated for the codec mentioned above: Scala -: @@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #codec } +: @@snip [BidiFlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala) { #codec } Java -: @@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #codec } +: @@snip [BidiFlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/BidiFlowDocTest.java) { #codec } The first version resembles the partial graph constructor, while for the simple @@ -319,10 +319,10 @@ as shown on the last line. The implementation of the two functions is not difficult either: Scala -: @@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #codec-impl } +: @@snip [BidiFlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala) { #codec-impl } Java -: @@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #codec-impl } +: @@snip [BidiFlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/BidiFlowDocTest.java) { #codec-impl } In this way you can integrate any other serialization library that @@ -334,19 +334,19 @@ zero or more messages. This is best implemented using @ref[`GraphStage`](stream- (see also @ref[Custom processing with GraphStage](stream-customize.md#graphstage)). Scala -: @@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #framing } +: @@snip [BidiFlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala) { #framing } Java -: @@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #framing } +: @@snip [BidiFlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/BidiFlowDocTest.java) { #framing } With these implementations we can build a protocol stack and test it: Scala -: @@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #compose } +: @@snip [BidiFlowDocSpec.scala](/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala) { #compose } Java -: @@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #compose } +: @@snip [BidiFlowDocTest.java](/akka-docs/src/test/java/jdocs/stream/BidiFlowDocTest.java) { #compose } This example demonstrates how `BidiFlow` subgraphs can be hooked @@ -364,20 +364,20 @@ If the materialized value is needed at more than one place, it is possible to ca times to acquire the necessary number of outlets. Scala -: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue } +: @@snip [GraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue } Java -: @@snip [GraphDSLTest.java]($akka$/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #graph-dsl-matvalue } +: @@snip [GraphDSLTest.java](/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #graph-dsl-matvalue } Be careful not to introduce a cycle where the materialized value actually contributes to the materialized value. The following example demonstrates a case where the materialized @scala[`Future`]@java[`CompletionStage`] of a fold is fed back to the fold itself. Scala -: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue-cycle } +: @@snip [GraphDSLDocSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue-cycle } Java -: @@snip [GraphDSLTest.java]($akka$/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #graph-dsl-matvalue-cycle } +: @@snip [GraphDSLTest.java](/akka-stream-tests/src/test/java/akka/stream/javadsl/GraphDslTest.java) { #graph-dsl-matvalue-cycle } @@ -403,10 +403,10 @@ see there are cases where this is very helpful. @@@ Scala -: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #deadlocked } +: @@snip [GraphCyclesSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala) { #deadlocked } Java -: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #deadlocked } +: @@snip [GraphCyclesDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphCyclesDocTest.java) { #deadlocked } Running this we observe that after a few numbers have been printed, no more elements are logged to the console - @@ -426,10 +426,10 @@ before trying the other lower priority input ports. Since we feed back through t that the elements in the cycles can flow. Scala -: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #unfair } +: @@snip [GraphCyclesSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala) { #unfair } Java -: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #unfair } +: @@snip [GraphCyclesDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphCyclesDocTest.java) { #unfair } If we run the example we see that the same sequence of numbers are printed @@ -449,10 +449,10 @@ To make our cycle both live (not deadlocking) and fair we can introduce a droppi case we chose the `buffer()` operation giving it a dropping strategy `OverflowStrategy.dropHead`. Scala -: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #dropping } +: @@snip [GraphCyclesSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala) { #dropping } Java -: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #dropping } +: @@snip [GraphCyclesDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphCyclesDocTest.java) { #dropping } If we run this example we see that @@ -473,10 +473,10 @@ Since `ZipWith` takes one element from `source` *and* from the feedback arc to i we maintain the balance of elements. Scala -: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-dead } +: @@snip [GraphCyclesSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-dead } Java -: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-dead } +: @@snip [GraphCyclesDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-dead } Still, when we try to run the example it turns out that no element is printed at all! After some investigation we @@ -490,10 +490,10 @@ element into the cycle that is independent from `source`. We do this by using a arc that injects a single element using `Source.single`. Scala -: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-live } +: @@snip [GraphCyclesSpec.scala](/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-live } Java -: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-live } +: @@snip [GraphCyclesDocTest.java](/akka-docs/src/test/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-live } When we run the above example we see that processing starts and never stops. The important takeaway from this example diff --git a/akka-docs/src/main/paradox/stream/stream-integrations.md b/akka-docs/src/main/paradox/stream/stream-integrations.md index cff5f02815..1c26f95e11 100644 --- a/akka-docs/src/main/paradox/stream/stream-integrations.md +++ b/akka-docs/src/main/paradox/stream/stream-integrations.md @@ -30,10 +30,10 @@ the `ask` and the mailbox of the actor will not be filled with more messages tha `parallelism` of the `ask` operator (similarly to how the `mapAsync` operator works). Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #ask } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #ask } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #ask } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #ask } Note that the messages received in the actor will be in the same order as the stream elements, i.e. the `parallelism` does not change the ordering @@ -48,10 +48,10 @@ reply will complete the @scala[`Future`]@java[`CompletionStage`] of the `ask` a In case the target actor is stopped, the operator will fail with an `AskStageTargetActorTerminatedException` Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #ask-actor } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #ask-actor } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #ask-actor } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #ask-actor } The stream can be completed with failure by sending `akka.actor.Status.Failure` as reply from the actor. @@ -85,18 +85,18 @@ given `onCompleteMessage` will be sent to the destination actor. When the stream failure a `akka.actor.Status.Failure` message will be sent to the destination actor. Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #actorRefWithAck } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #actorRefWithAck } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #actorRefWithAck } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #actorRefWithAck } The receiving actor would then need to be implemented similar to the following: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #actorRefWithAck-actor } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #actorRefWithAck-actor } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #actorRefWithAck-actor } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #actorRefWithAck-actor } Note that replying to the sender of the elements (the "stream") is required as lack of those ack signals would be interpreted as back-pressure (as intended), and no new elements will be sent into the actor until it acknowledges some elements. @@ -141,10 +141,10 @@ was dropped. Can also complete with `QueueOfferResult.Failure` - when stream fa `QueueOfferResult.QueueClosed` when downstream is completed. Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #source-queue } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #source-queue } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #source-queue } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #source-queue } When used from an actor you typically `pipe` the result of the @scala[`Future`]@java[`CompletionStage`] back to the actor to continue processing. @@ -178,43 +178,43 @@ For example, sending emails to the authors of selected tweets using an external email service: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-server-send } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #email-server-send } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-server-send } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #email-server-send } We start with the tweet stream of authors: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #tweet-authors} +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #tweet-authors} Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #tweet-authors } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #tweet-authors } Assume that we can lookup their email address using: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-address-lookup } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #email-address-lookup } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-address-lookup } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #email-address-lookup } Transforming the stream of authors to a stream of email addresses by using the `lookupEmail` service can be done with `mapAsync`: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-addresses-mapAsync } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #email-addresses-mapAsync } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync } Finally, sending the emails: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #send-emails } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #send-emails } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #send-emails } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #send-emails } `mapAsync` is applying the given function that is calling out to the external service to each of the elements as they pass through this processing step. The function returns a @scala[`Future`]@java[`CompletionStage`] @@ -237,10 +237,10 @@ Note that `mapAsync` preserves the order of the stream elements. In this example is not important and then we can use the more efficient `mapAsyncUnordered`: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #external-service-mapAsyncUnordered } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #external-service-mapAsyncUnordered } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #external-service-mapAsyncUnordered } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #external-service-mapAsyncUnordered } In the above example the services conveniently returned a @scala[`Future`]@java[`CompletionStage`] of the result. If that is not the case you need to wrap the call in a @scala[`Future`]@java[`CompletionStage`]. If the service call @@ -248,23 +248,23 @@ involves blocking you must also make sure that you run it on a dedicated executi avoid starvation and disturbance of other tasks in the system. Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-mapAsync } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-mapAsync } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #blocking-mapAsync } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #blocking-mapAsync } The configuration of the `"blocking-dispatcher"` may look something like: -@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-dispatcher-config } +@@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-dispatcher-config } An alternative for blocking calls is to perform them in a `map` operation, still using a dedicated dispatcher for that operation. Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-map } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-map } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #blocking-map } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #blocking-map } However, that is not exactly the same as `mapAsync`, since the `mapAsync` may run several calls concurrently, but `map` performs them one at a time. @@ -273,10 +273,10 @@ For a service that is exposed as an actor, or if an actor is used as a gateway i external service, you can use `ask`: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #save-tweets } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #save-tweets } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #save-tweets } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #save-tweets } Note that if the `ask` is not completed within the given timeout the stream is completed with failure. If that is not desired outcome you can use `recover` on the `ask` @scala[`Future`]@java[`CompletionStage`]. @@ -305,10 +305,10 @@ successive calls as long as there is downstream demand of several elements. Here is a fictive service that we can use to illustrate these aspects. Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-service } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-service } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-service } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-service } Elements starting with a lower case character are simulated to take longer time to process. @@ -316,10 +316,10 @@ to process. Here is how we can use it with `mapAsync`: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-mapAsync } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-mapAsync } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-mapAsync } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-mapAsync } The output may look like this: @@ -377,10 +377,10 @@ calls are limited by the buffer size (4) of the `ActorMaterializerSettings`. Here is how we can use the same service with `mapAsyncUnordered`: Scala -: @@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-mapAsyncUnordered } +: @@snip [IntegrationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-mapAsyncUnordered } Java -: @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-mapAsyncUnordered } +: @@snip [IntegrationDocTest.java](/akka-docs/src/test/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-mapAsyncUnordered } The output may look like this: @@ -451,34 +451,34 @@ An incomplete list of other implementations: The two most important interfaces in Reactive Streams are the `Publisher` and `Subscriber`. Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #imports } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #imports } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #imports } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #imports } Let us assume that a library provides a publisher of tweets: Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #tweets-publisher } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #tweets-publisher } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #tweets-publisher } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #tweets-publisher } and another library knows how to store author handles in a database: Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #author-storage-subscriber } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #author-storage-subscriber } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #author-storage-subscriber } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #author-storage-subscriber } Using an Akka Streams `Flow` we can transform the stream and connect those: Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #authors #connect-all } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #authors #connect-all } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #authors #connect-all } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #authors #connect-all } The `Publisher` is used as an input `Source` to the flow and the `Subscriber` is used as an output `Sink`. @@ -488,10 +488,10 @@ materializes to a `Processor` when `run()` is called. `run()` itself can be call times, resulting in a new `Processor` instance each time. Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #flow-publisher-subscriber } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #flow-publisher-subscriber } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #flow-publisher-subscriber } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #flow-publisher-subscriber } A publisher can be connected to a subscriber with the `subscribe` method. @@ -499,10 +499,10 @@ It is also possible to expose a `Source` as a `Publisher` by using the Publisher-`Sink`: Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #source-publisher } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #source-publisher } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #source-publisher } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #source-publisher } A publisher that is created with @scala[`Sink.asPublisher(fanout = false)`]@java[`Sink.asPublisher(AsPublisher.WITHOUT_FANOUT)`] supports only a single subscription. Additional subscription attempts will be rejected with an `IllegalStateException`. @@ -510,17 +510,17 @@ Additional subscription attempts will be rejected with an `IllegalStateException A publisher that supports multiple subscribers using fan-out/broadcasting is created as follows: Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #author-alert-subscriber #author-storage-subscriber } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #author-alert-subscriber #author-storage-subscriber } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #author-alert-subscriber #author-storage-subscriber } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #author-alert-subscriber #author-storage-subscriber } Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #source-fanoutPublisher } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #source-fanoutPublisher } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #source-fanoutPublisher } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #source-fanoutPublisher } The input buffer size of the operator controls how far apart the slowest subscriber can be from the fastest subscriber before slowing down the stream. @@ -529,19 +529,19 @@ To make the picture complete, it is also possible to expose a `Sink` as a `Subsc by using the Subscriber-`Source`: Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #sink-subscriber } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #sink-subscriber } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #sink-subscriber } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #sink-subscriber } It is also possible to use re-wrap `Processor` instances as a `Flow` by passing a factory function that will create the `Processor` instances: Scala -: @@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #use-processor } +: @@snip [ReactiveStreamsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #use-processor } Java -: @@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #use-processor } +: @@snip [ReactiveStreamsDocTest.java](/akka-docs/src/test/java/jdocs/stream/ReactiveStreamsDocTest.java) { #use-processor } Please note that a factory is necessary to achieve reusability of the resulting `Flow`. @@ -584,10 +584,10 @@ stream publisher that keeps track of the subscription life cycle and requested e Here is an example of such an actor. It dispatches incoming jobs to the attached subscriber: Scala -: @@snip [ActorPublisherDocSpec.scala]($code$/scala/docs/stream/ActorPublisherDocSpec.scala) { #job-manager } +: @@snip [ActorPublisherDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ActorPublisherDocSpec.scala) { #job-manager } Java -: @@snip [ActorPublisherDocTest.java]($code$/java/jdocs/stream/ActorPublisherDocTest.java) { #job-manager } +: @@snip [ActorPublisherDocTest.java](/akka-docs/src/test/java/jdocs/stream/ActorPublisherDocTest.java) { #job-manager } You send elements to the stream by calling `onNext`. You are allowed to send as many elements as have been requested by the stream subscriber. This amount can be inquired with @@ -620,10 +620,10 @@ More detailed information can be found in the API documentation. This is how it can be used as input `Source` to a `Flow`: Scala -: @@snip [ActorPublisherDocSpec.scala]($code$/scala/docs/stream/ActorPublisherDocSpec.scala) { #actor-publisher-usage } +: @@snip [ActorPublisherDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ActorPublisherDocSpec.scala) { #actor-publisher-usage } Java -: @@snip [ActorPublisherDocTest.java]($code$/java/jdocs/stream/ActorPublisherDocTest.java) { #actor-publisher-usage } +: @@snip [ActorPublisherDocTest.java](/akka-docs/src/test/java/jdocs/stream/ActorPublisherDocTest.java) { #actor-publisher-usage } @scala[A publisher that is created with `Sink.asPublisher` supports a specified number of subscribers. Additional subscription attempts will be rejected with an `IllegalStateException`. @@ -652,10 +652,10 @@ messages from the stream. It can also receive other, non-stream messages, in the Here is an example of such an actor. It dispatches incoming jobs to child worker actors: Scala -: @@snip [ActorSubscriberDocSpec.scala]($code$/scala/docs/stream/ActorSubscriberDocSpec.scala) { #worker-pool } +: @@snip [ActorSubscriberDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ActorSubscriberDocSpec.scala) { #worker-pool } Java -: @@snip [ActorSubscriberDocTest.java]($code$/java/jdocs/stream/ActorSubscriberDocTest.java) { #worker-pool } +: @@snip [ActorSubscriberDocTest.java](/akka-docs/src/test/java/jdocs/stream/ActorSubscriberDocTest.java) { #worker-pool } Subclass must define the `RequestStrategy` to control stream back pressure. After each incoming message the @scala[`ActorSubscriber`]@java[`AbstractActorSubscriber`] will automatically invoke @@ -674,7 +674,7 @@ More detailed information can be found in the API documentation. This is how it can be used as output `Sink` to a `Flow`: Scala -: @@snip [ActorSubscriberDocSpec.scala]($code$/scala/docs/stream/ActorSubscriberDocSpec.scala) { #actor-subscriber-usage } +: @@snip [ActorSubscriberDocSpec.scala](/akka-docs/src/test/scala/docs/stream/ActorSubscriberDocSpec.scala) { #actor-subscriber-usage } Java -: @@snip [ActorSubscriberDocTest.java]($code$/java/jdocs/stream/ActorSubscriberDocTest.java) { #actor-subscriber-usage } +: @@snip [ActorSubscriberDocTest.java](/akka-docs/src/test/java/jdocs/stream/ActorSubscriberDocTest.java) { #actor-subscriber-usage } diff --git a/akka-docs/src/main/paradox/stream/stream-io.md b/akka-docs/src/main/paradox/stream/stream-io.md index 1b2dfcd6d6..9e337b2352 100644 --- a/akka-docs/src/main/paradox/stream/stream-io.md +++ b/akka-docs/src/main/paradox/stream/stream-io.md @@ -25,10 +25,10 @@ In order to implement a simple EchoServer we `bind` to a given address, which re which will emit an `IncomingConnection` element for each new connection that the Server should handle: Scala -: @@snip [StreamTcpDocSpec.scala]($code$/scala/docs/stream/io/StreamTcpDocSpec.scala) { #echo-server-simple-bind } +: @@snip [StreamTcpDocSpec.scala](/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala) { #echo-server-simple-bind } Java -: @@snip [StreamTcpDocTest.java]($code$/java/jdocs/stream/io/StreamTcpDocTest.java) { #echo-server-simple-bind } +: @@snip [StreamTcpDocTest.java](/akka-docs/src/test/java/jdocs/stream/io/StreamTcpDocTest.java) { #echo-server-simple-bind } ![tcp-stream-bind.png](../images/tcp-stream-bind.png) @@ -40,10 +40,10 @@ argument indicates that we require an explicit line ending even for the last mes In this example we add exclamation marks to each incoming text message and push it through the flow: Scala -: @@snip [StreamTcpDocSpec.scala]($code$/scala/docs/stream/io/StreamTcpDocSpec.scala) { #echo-server-simple-handle } +: @@snip [StreamTcpDocSpec.scala](/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala) { #echo-server-simple-handle } Java -: @@snip [StreamTcpDocTest.java]($code$/java/jdocs/stream/io/StreamTcpDocTest.java) { #echo-server-simple-handle } +: @@snip [StreamTcpDocTest.java](/akka-docs/src/test/java/jdocs/stream/io/StreamTcpDocTest.java) { #echo-server-simple-handle } ![tcp-stream-run.png](../images/tcp-stream-run.png) @@ -70,10 +70,10 @@ and would like to interact with it using Akka Streams over TCP. To open an outgo the `outgoingConnection` method: Scala -: @@snip [StreamTcpDocSpec.scala]($code$/scala/docs/stream/io/StreamTcpDocSpec.scala) { #repl-client } +: @@snip [StreamTcpDocSpec.scala](/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala) { #repl-client } Java -: @@snip [StreamTcpDocTest.java]($code$/java/jdocs/stream/io/StreamTcpDocTest.java) { #repl-client } +: @@snip [StreamTcpDocTest.java](/akka-docs/src/test/java/jdocs/stream/io/StreamTcpDocTest.java) { #repl-client } The `repl` flow we use to handle the server interaction first prints the servers response, then awaits on input from the command line (this blocking call is used here for the sake of simplicity) and converts it to a @@ -109,10 +109,10 @@ to the protocol we are trying to implement using Streams. In chat-like applicati it makes sense to make the Server initiate the conversation by emitting a "hello" message: Scala -: @@snip [StreamTcpDocSpec.scala]($code$/scala/docs/stream/io/StreamTcpDocSpec.scala) { #welcome-banner-chat-server } +: @@snip [StreamTcpDocSpec.scala](/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala) { #welcome-banner-chat-server } Java -: @@snip [StreamTcpDocTest.java]($code$/java/jdocs/stream/io/StreamTcpDocTest.java) { #welcome-banner-chat-server } +: @@snip [StreamTcpDocTest.java](/akka-docs/src/test/java/jdocs/stream/io/StreamTcpDocTest.java) { #welcome-banner-chat-server } To emit the initial message we merge a `Source` with a single element, after the command processing but before the framing and transformation to `ByteString` s this way we do not have to repeat such logic. @@ -138,10 +138,10 @@ for more information. @scala[[JsonFraming](http://doc.akka.io/api/akka/current/akka/stream/scaladsl/JsonFraming$.html)]@java[[JsonFraming](http://doc.akka.io/japi/akka/current/akka/stream/javadsl/JsonFraming.html#objectScanner-int-)] separates valid JSON objects from incoming `ByteString` objects: Scala -: @@snip [JsonFramingSpec.scala]($akka$akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala) { #using-json-framing } +: @@snip [JsonFramingSpec.scala](/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala) { #using-json-framing } Java -: @@snip [JsonFramingTest.java]($akka$akka-stream-tests/src/test/java/akka/stream/javadsl/JsonFramingTest.java) { #using-json-framing } +: @@snip [JsonFramingTest.java](/akka-stream-tests/src/test/java/akka/stream/javadsl/JsonFramingTest.java) { #using-json-framing } ### TLS @@ -150,10 +150,10 @@ Similar factories as shown above for raw TCP but where the data is encrypted usi Using TLS requires a keystore and a truststore and then a somewhat involved dance of configuring the SSLContext and the details for how the session should be negotiated: Scala -: @@snip [TcpSpec.scala]($akka$akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala) { #setting-up-ssl-context } +: @@snip [TcpSpec.scala](/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala) { #setting-up-ssl-context } Java -: @@snip [TcpTest.java]($akka$akka-stream-tests/src/test/java/akka/stream/javadsl/TcpTest.java) { #setting-up-ssl-context } +: @@snip [TcpTest.java](/akka-stream-tests/src/test/java/akka/stream/javadsl/TcpTest.java) { #setting-up-ssl-context } The `SslContext` and `NegotiateFirstSession` instances can then be used with the binding or outgoing connection factory methods. @@ -167,10 +167,10 @@ Streaming data from a file is as easy as creating a *FileIO.fromPath* given a ta `chunkSize` which determines the buffer size determined as one "element" in such stream: Scala -: @@snip [StreamFileDocSpec.scala]($code$/scala/docs/stream/io/StreamFileDocSpec.scala) { #file-source } +: @@snip [StreamFileDocSpec.scala](/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala) { #file-source } Java -: @@snip [StreamFileDocTest.java]($code$/java/jdocs/stream/io/StreamFileDocTest.java) { #file-source } +: @@snip [StreamFileDocTest.java](/akka-docs/src/test/java/jdocs/stream/io/StreamFileDocTest.java) { #file-source } Please note that these operators are backed by Actors and by default are configured to run on a pre-configured threadpool-backed dispatcher dedicated for File IO. This is very important as it isolates the blocking file IO operations from the rest @@ -179,7 +179,7 @@ dispatcher for file IO operations globally, you can do so by changing the `akka. or for a specific operator by specifying a custom Dispatcher in code, like this: Scala -: @@snip [StreamFileDocSpec.scala]($code$/scala/docs/stream/io/StreamFileDocSpec.scala) { #custom-dispatcher-code } +: @@snip [StreamFileDocSpec.scala](/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala) { #custom-dispatcher-code } Java -: @@snip [StreamFileDocTest.java]($code$/java/jdocs/stream/io/StreamFileDocTest.java) { #custom-dispatcher-code } +: @@snip [StreamFileDocTest.java](/akka-docs/src/test/java/jdocs/stream/io/StreamFileDocTest.java) { #custom-dispatcher-code } diff --git a/akka-docs/src/main/paradox/stream/stream-parallelism.md b/akka-docs/src/main/paradox/stream/stream-parallelism.md index ab3b136188..f7a88123d5 100644 --- a/akka-docs/src/main/paradox/stream/stream-parallelism.md +++ b/akka-docs/src/main/paradox/stream/stream-parallelism.md @@ -36,10 +36,10 @@ completion. This is how this setup would look like implemented as a stream: Scala -: @@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelining } +: @@snip [FlowParallelismDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelining } Java -: @@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelining } +: @@snip [FlowParallelismDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelining } The two `map` operators in sequence (encapsulated in the "frying pan" flows) will be executed in a pipelined way, the same way that Roland was using his frying pans: @@ -71,10 +71,10 @@ In essence he parallelizes the same process over multiple pans. This is how this using streams: Scala -: @@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallelism } +: @@snip [FlowParallelismDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallelism } Java -: @@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #parallelism } +: @@snip [FlowParallelismDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowParallelismDocTest.java) { #parallelism } The benefit of parallelizing is that it is easy to scale. In the pancake example it is easy to add a third frying pan with Patrik's method, but Roland cannot add a third frying pan, @@ -97,10 +97,10 @@ will employ two chefs, each working using Roland's pipelining method, but we use Patrik used the two frying pans. This is how it looks like if expressed as streams: Scala -: @@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallel-pipeline } +: @@snip [FlowParallelismDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallel-pipeline } Java -: @@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #parallel-pipeline } +: @@snip [FlowParallelismDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowParallelismDocTest.java) { #parallel-pipeline } The above pattern works well if there are many independent jobs that do not depend on the results of each other, but the jobs themselves need multiple processing steps where each step builds on the result of @@ -118,10 +118,10 @@ plate. This is again straightforward to implement with the streams API: Scala -: @@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelined-parallel } +: @@snip [FlowParallelismDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelined-parallel } Java -: @@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelined-parallel } +: @@snip [FlowParallelismDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelined-parallel } This usage pattern is less common but might be usable if a certain step in the pipeline might take wildly different times to finish different jobs. The reason is that there are more balance-merge steps in this pattern diff --git a/akka-docs/src/main/paradox/stream/stream-quickstart.md b/akka-docs/src/main/paradox/stream/stream-quickstart.md index 822022c140..4e2c44066b 100644 --- a/akka-docs/src/main/paradox/stream/stream-quickstart.md +++ b/akka-docs/src/main/paradox/stream/stream-quickstart.md @@ -22,34 +22,34 @@ A stream usually begins at a source, so this is also how we start an Akka Stream. Before we create one, we import the full complement of streaming tools: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #stream-imports } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #stream-imports } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #stream-imports } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #stream-imports } If you want to execute the code samples while you read through the quick start guide, you will also need the following imports: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #other-imports } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #other-imports } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #other-imports } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #other-imports } And an @scala[object]@java[class] to hold your code, for example: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #main-app } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #main-app } Java -: @@snip [Main.java]($code$/java/jdocs/stream/Main.java) { #main-app } +: @@snip [Main.java](/akka-docs/src/test/java/jdocs/stream/Main.java) { #main-app } Now we will start with a rather simple source, emitting the integers 1 to 100: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #create-source } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #create-source } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #create-source } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #create-source } The `Source` type is parameterized with two types: the first one is the type of element that this source emits and the second one may signal that @@ -63,10 +63,10 @@ first 100 natural numbers, but this source is not yet active. In order to get those numbers out we have to run it: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #run-source } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #run-source } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #run-source } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #run-source } This line will complement the source with a consumer function—in this example we print out the numbers to the console—and pass this little stream @@ -79,20 +79,20 @@ terminate, because the `ActorSystem` is never terminated. Luckily `runForeach` returns a @scala[`Future[Done]`]@java[`CompletionStage`] which resolves when the stream finishes: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #run-source-and-terminate } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #run-source-and-terminate } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #run-source-and-terminate } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #run-source-and-terminate } You may wonder where the Actor gets created that runs the stream, and you are probably also asking yourself what this `materializer` means. In order to get this value we first need to create an Actor system: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #create-materializer } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #create-materializer } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #create-materializer } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #create-materializer } There are other ways to create a materializer, e.g. from an `ActorContext` when using streams from within Actors. The @@ -108,10 +108,10 @@ be reused, incorporated into a larger design. We may choose to transform the source of integers and write it to a file instead: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #transform-source } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #transform-source } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #transform-source } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #transform-source } First we use the `scan` operator to run a computation over the whole stream: starting with the number 1 (@scala[`BigInt(1)`]@java[`BigInteger.ONE`]) we multiply by each of @@ -132,7 +132,7 @@ whether the stream terminated normally or exceptionally. Here is another example that you can edit and run in the browser: -@@fiddle [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #fiddle_code template=Akka layout=v75 minheight=400px } +@@fiddle [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #fiddle_code template=Akka layout=v75 minheight=400px } ## Reusable Pieces @@ -147,10 +147,10 @@ plain English), we need a starting point that is like a source but with an “open” input. In Akka Streams this is called a `Flow`: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #transform-sink } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #transform-sink } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #transform-sink } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #transform-sink } Starting from a flow of strings we convert each to `ByteString` and then feed to the already known file-writing `Sink`. The resulting blueprint @@ -167,10 +167,10 @@ attaching it to our `factorials` source—after a small adaptation to turn the numbers into strings: Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #use-transformed-sink } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #use-transformed-sink } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #use-transformed-sink } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #use-transformed-sink } ## Time-Based Processing @@ -183,10 +183,10 @@ second is the factorial of one, and so on. We combine these two by forming strings like `"3! = 6"`. Scala -: @@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #add-streams } +: @@snip [QuickStartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala) { #add-streams } Java -: @@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #add-streams } +: @@snip [QuickStartDocTest.java](/akka-docs/src/test/java/jdocs/stream/QuickStartDocTest.java) { #add-streams } All operations so far have been time-independent and could have been performed in the same fashion on strict collections of elements. The next line @@ -225,10 +225,10 @@ allow to control what should happen in such scenarios. Here's the data model we'll be working with throughout the quickstart examples: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #model } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #model } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #model } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #model } @@@ note @@ -247,10 +247,10 @@ In order to prepare our environment by creating an `ActorSystem` and `ActorMater which will be responsible for materializing and running the streams we are about to create: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #materializer-setup } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #materializer-setup } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #materializer-setup } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #materializer-setup } The `ActorMaterializer` can optionally take `ActorMaterializerSettings` which can be used to define materialization properties, such as default buffer sizes (see also @ref:[Buffers for asynchronous operators](stream-rate.md#async-stream-buffers)), the dispatcher to @@ -259,10 +259,10 @@ be used by the pipeline etc. These can be overridden with `withAttributes` on `F Let's assume we have a stream of tweets readily available. In Akka this is expressed as a @scala[`Source[Out, M]`]@java[`Source`]: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweet-source } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweet-source } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweet-source } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweet-source } Streams always start flowing from a @scala[`Source[Out,M1]`]@java[`Source`] then can continue through @scala[`Flow[In,Out,M2]`]@java[`Flow`] elements or more advanced operators to finally be consumed by a @scala[`Sink[In,M3]`]@java[`Sink`] @scala[(ignore the type parameters `M1`, `M2` @@ -278,10 +278,10 @@ however they operate on streams and not collections of data (which is a very imp only make sense in streaming and vice versa): Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-filter-map } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-filter-map } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-filter-map } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-filter-map } Finally in order to @ref:[materialize](stream-flows-and-basics.md#stream-materialization) and run the stream computation we need to attach the Flow to a @scala[`Sink`]@java[`Sink`] that will get the Flow running. The simplest way to do this is to call @@ -290,18 +290,18 @@ the @scala[`Sink` companion object]@java[`Sink class`]. For now let's print each author: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-foreachsink-println } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-foreachsink-println } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-foreachsink-println } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-foreachsink-println } or by using the shorthand version (which are defined only for the most popular Sinks such as `Sink.fold` and `Sink.foreach`): Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-foreach-println } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-foreach-println } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-foreach-println } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-foreach-println } Materializing and running a stream always requires a `Materializer` to be @scala[in implicit scope (or passed in explicitly, like this: `.run(materializer)`)]@java[passed in explicitly, like this: `.run(mat)`]. @@ -309,10 +309,10 @@ like this: `.run(materializer)`)]@java[passed in explicitly, like this: `.run(ma The complete snippet looks like this: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #first-sample } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #first-sample } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #first-sample } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #first-sample } ## Flattening sequences in streams @@ -322,10 +322,10 @@ works on Scala Collections. In order to get a flattened stream of hashtags from operator: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #hashtags-mapConcat } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #hashtags-mapConcat } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #hashtags-mapConcat } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #hashtags-mapConcat } @@@ note @@ -356,10 +356,10 @@ at the expense of not reading as familiarly as collection transformations. Graphs are constructed using `GraphDSL` like this: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #graph-dsl-broadcast } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #graph-dsl-broadcast } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #graph-dsl-broadcast } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #graph-dsl-broadcast } As you can see, @scala[inside the `GraphDSL` we use an implicit graph builder `b` to mutably construct the graph using the `~>` "edge operator" (also read as "connect" or "via" or "to"). The operator is provided implicitly @@ -392,10 +392,10 @@ and must be handled explicitly. For example, if we are only interested in the "* elements*" this can be expressed using the `buffer` element: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-slow-consumption-dropHead } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-slow-consumption-dropHead } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-slow-consumption-dropHead } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-slow-consumption-dropHead } The `buffer` element takes an explicit and required `OverflowStrategy`, which defines how the buffer should react when it receives another element while it is full. Strategies provided include dropping the oldest element (`dropHead`), @@ -414,10 +414,10 @@ but in general it is possible to deal with finite streams and come up with a nic First, let's write such an element counter using @scala[`Sink.fold` and]@java[`Flow.of(Class)` and `Sink.fold` to] see how the types look like: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-fold-count } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-fold-count } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-fold-count } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-fold-count } @scala[First we prepare a reusable `Flow` that will change each incoming tweet into an integer of value `1`. We'll use this in order to combine those with a `Sink.fold` that will sum all `Int` elements of the stream and make its result available as @@ -446,20 +446,20 @@ for example one that consumes a live stream of tweets within a minute, the mater will be different, as illustrated by this example: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-runnable-flow-materialized-twice } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-runnable-flow-materialized-twice } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-runnable-flow-materialized-twice } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-runnable-flow-materialized-twice } Many elements in Akka Streams provide materialized values which can be used for obtaining either results of computation or steering these elements which will be discussed in detail in @ref:[Stream Materialization](stream-flows-and-basics.md#stream-materialization). Summing up this section, now we know what happens behind the scenes when we run this one-liner, which is equivalent to the multi line version above: Scala -: @@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-fold-count-oneline } +: @@snip [TwitterStreamQuickstartDocSpec.scala](/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-fold-count-oneline } Java -: @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-fold-count-oneline } +: @@snip [TwitterStreamQuickstartDocTest.java](/akka-docs/src/test/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-fold-count-oneline } @@@ note diff --git a/akka-docs/src/main/paradox/stream/stream-rate.md b/akka-docs/src/main/paradox/stream/stream-rate.md index ba2daa5270..c5757ef578 100644 --- a/akka-docs/src/main/paradox/stream/stream-rate.md +++ b/akka-docs/src/main/paradox/stream/stream-rate.md @@ -25,10 +25,10 @@ asynchronously means that an operator, after handing out an element to its downs process the next message. To demonstrate what we mean by this, let's take a look at the following example: Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #pipelining } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #pipelining } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #pipelining } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #pipelining } Running the above example, one of the possible outputs looks like this: @@ -79,27 +79,27 @@ Alternatively they can be set by passing a @scala[@scaladoc[`ActorMaterializerSettings`](akka.stream.ActorMaterializerSettings)]@java[@javadoc[`ActorMaterializerSettings`](akka.stream.ActorMaterializerSettings)] to the materializer: Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #materializer-buffer } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #materializer-buffer } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #materializer-buffer } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #materializer-buffer } If the buffer size needs to be set for segments of a @scala[@scaladoc[`Flow`](akka.stream.scaladsl.Flow)]@java[@javadoc[`Flow`](akka.stream.javadsl.Flow)] only, it is possible by defining a separate @scala[@scaladoc[`Flow`](akka.stream.scaladsl.Flow)]@java[@javadoc[`Flow`](akka.stream.javadsl.Flow)] with these attributes: Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #section-buffer } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #section-buffer } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #section-buffer } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #section-buffer } Here is an example of a code that demonstrate some of the issues caused by internal buffers: Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #buffering-abstraction-leak } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #buffering-abstraction-leak } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #buffering-abstraction-leak } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #buffering-abstraction-leak } Running the above example one would expect the number *3* to be printed in every 3 seconds (the `conflateWithSeed` step here is configured so that it counts the number of elements received before the downstream @scala[@scaladoc[`ZipWith`](akka.stream.scaladsl.ZipWith$)]@java[@javadoc[`ZipWith`](akka.stream.javadsl.ZipWith$)] consumes @@ -124,10 +124,10 @@ The example below will ensure that 1000 jobs (but not more) are dequeued from an stored locally in memory - relieving the external system: Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-backpressure } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-backpressure } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-backpressure } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-backpressure } The next example will also queue up 1000 jobs locally, but if there are more jobs waiting @@ -137,19 +137,19 @@ it must be noted that this will drop the *youngest* waiting job. If some "fairne we want to be nice to jobs that has been waiting for long, then this option can be useful. Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-droptail } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-droptail } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-droptail } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-droptail } Instead of dropping the youngest element from the tail of the buffer a new element can be dropped without enqueueing it to the buffer at all. Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-dropnew } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-dropnew } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-dropnew } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-dropnew } Here is another example with a queue of 1000 jobs, but it makes space for the new element by dropping one element from the *head* of the buffer. This is the *oldest* @@ -159,20 +159,20 @@ retransmitted soon, (in fact a retransmitted duplicate might be already in the q so it makes sense to drop it first. Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-drophead } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-drophead } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-drophead } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-drophead } Compared to the dropping strategies above, dropBuffer drops all the 1000 jobs it has enqueued once the buffer gets full. This aggressive strategy is useful when dropping jobs is preferred to delaying jobs. Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-dropbuffer } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-dropbuffer } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-dropbuffer } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-dropbuffer } If our imaginary external job provider is a client using our API, we might want to enforce that the client cannot have more than 1000 queued jobs @@ -181,10 +181,10 @@ achievable by the error strategy which fails the stream once the buffer gets full. Scala -: @@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-fail } +: @@snip [StreamBuffersRateSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-fail } Java -: @@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-fail } +: @@snip [StreamBuffersRateDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-fail } ## Rate transformation @@ -197,10 +197,10 @@ Below is an example snippet that summarizes fast stream of elements to a standar that have arrived while the stats have been calculated. Scala -: @@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #conflate-summarize } +: @@snip [RateTransformationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala) { #conflate-summarize } Java -: @@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #conflate-summarize } +: @@snip [RateTransformationDocTest.java](/akka-docs/src/test/java/jdocs/stream/RateTransformationDocTest.java) { #conflate-summarize } This example demonstrates that such flow's rate is decoupled. The element rate at the start of the flow can be much higher than the element rate at the end of the flow. @@ -210,10 +210,10 @@ The example below demonstrates how `conflate` can be used to randomly drop eleme to keep up with the producer. Scala -: @@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #conflate-sample } +: @@snip [RateTransformationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala) { #conflate-sample } Java -: @@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #conflate-sample } +: @@snip [RateTransformationDocTest.java](/akka-docs/src/test/java/jdocs/stream/RateTransformationDocTest.java) { #conflate-sample } ### Understanding extrapolate and expand @@ -225,36 +225,36 @@ As a simple use case of `extrapolate`, here is a flow that repeats the last emit the consumer signals demand and the producer cannot supply new elements yet. Scala -: @@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #extrapolate-last } +: @@snip [RateTransformationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala) { #extrapolate-last } Java -: @@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #extrapolate-last } +: @@snip [RateTransformationDocTest.java](/akka-docs/src/test/java/jdocs/stream/RateTransformationDocTest.java) { #extrapolate-last } For situations where there may be downstream demand before any element is emitted from upstream, you can use the `initial` parameter of `extrapolate` to "seed" the stream. Scala -: @@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #extrapolate-seed } +: @@snip [RateTransformationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala) { #extrapolate-seed } Java -: @@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #extrapolate-seed } +: @@snip [RateTransformationDocTest.java](/akka-docs/src/test/java/jdocs/stream/RateTransformationDocTest.java) { #extrapolate-seed } `extrapolate` and `expand` also allow to produce metainformation based on demand signalled from the downstream. Leveraging this, here is a flow that tracks and reports a drift between a fast consumer and a slow producer. Scala -: @@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #extrapolate-drift } +: @@snip [RateTransformationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala) { #extrapolate-drift } Java -: @@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #extrapolate-drift } +: @@snip [RateTransformationDocTest.java](/akka-docs/src/test/java/jdocs/stream/RateTransformationDocTest.java) { #extrapolate-drift } And here's a more concise representation with `expand`. Scala -: @@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #expand-drift } +: @@snip [RateTransformationDocSpec.scala](/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala) { #expand-drift } Java -: @@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #expand-drift } +: @@snip [RateTransformationDocTest.java](/akka-docs/src/test/java/jdocs/stream/RateTransformationDocTest.java) { #expand-drift } The difference is due to the different handling of the `Iterator`-generating argument. @@ -264,4 +264,4 @@ an `Iterator` and emits elements downstream from it. This makes `expand` able to transform or even filter out (by providing an empty `Iterator`) the "original" elements. Regardless, since we provide a non-empty `Iterator` in both examples, this means that the -output of this flow is going to report a drift of zero if the producer is fast enough - or a larger drift otherwise. \ No newline at end of file +output of this flow is going to report a drift of zero if the producer is fast enough - or a larger drift otherwise. diff --git a/akka-docs/src/main/paradox/stream/stream-refs.md b/akka-docs/src/main/paradox/stream/stream-refs.md index 23e4fa739e..d4bd453d12 100644 --- a/akka-docs/src/main/paradox/stream/stream-refs.md +++ b/akka-docs/src/main/paradox/stream/stream-refs.md @@ -81,19 +81,19 @@ That sink materializes the `SourceRef` that you can then send to other nodes. Pl `Future` so you will have to use the pipeTo Scala -: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-source } +: @@snip [FlowStreamRefsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-source } Java -: @@snip [FlowStreamRefsDocTest.java]($code$/java/jdocs/stream/FlowStreamRefsDocTest.java) { #offer-source } +: @@snip [FlowStreamRefsDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowStreamRefsDocTest.java) { #offer-source } The origin actor which creates and owns the Source could also perform some validation or additional setup when preparing the source. Once it has handed out the `SourceRef` the remote side can run it like this: Scala -: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-source-use } +: @@snip [FlowStreamRefsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-source-use } Java -: @@snip [FlowStreamRefsDocTest.java]($code$/java/jdocs/stream/FlowStreamRefsDocTest.java) { #offer-source-use } +: @@snip [FlowStreamRefsDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowStreamRefsDocTest.java) { #offer-source-use } The process of preparing and running a `SourceRef` powered distributed stream is shown by the animation below: @@ -122,19 +122,19 @@ into various other systems (e.g. any of the Alpakka provided Sinks). @@@ Scala -: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-sink } +: @@snip [FlowStreamRefsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-sink } Java -: @@snip [FlowStreamRefsDocTest.java]($code$/java/jdocs/stream/FlowStreamRefsDocTest.java) { #offer-sink } +: @@snip [FlowStreamRefsDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowStreamRefsDocTest.java) { #offer-sink } Using the offered `SinkRef` to send data to the origin of the Sink is also simple, as we can treat the SinkRef just as any other Sink and directly `runWith` or `run` with it. Scala -: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-sink-use } +: @@snip [FlowStreamRefsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-sink-use } Java -: @@snip [FlowStreamRefsDocTest.java]($code$/java/jdocs/stream/FlowStreamRefsDocTest.java) { #offer-sink-use } +: @@snip [FlowStreamRefsDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowStreamRefsDocTest.java) { #offer-sink-use } The process of preparing and running a `SinkRef` powered distributed stream is shown by the animation below: @@ -190,10 +190,10 @@ globally (`akka.stream.materializer.stream-ref.subscription-timeout`), but also Scala -: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #attr-sub-timeout } +: @@snip [FlowStreamRefsDocSpec.scala](/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #attr-sub-timeout } Java -: @@snip [FlowStreamRefsDocTest.java]($code$/java/jdocs/stream/FlowStreamRefsDocTest.java) { #attr-sub-timeout } +: @@snip [FlowStreamRefsDocTest.java](/akka-docs/src/test/java/jdocs/stream/FlowStreamRefsDocTest.java) { #attr-sub-timeout } ## General configuration @@ -201,4 +201,4 @@ Java Other settings can be set globally, in your `application.conf`, by overriding any of the following values in the `akka.stream.materializer.stream-ref.*` keyspace: -@@snip [reference.conf]($akka$/akka-stream/src/main/resources/reference.conf) { #stream-ref } +@@snip [reference.conf](/akka-stream/src/main/resources/reference.conf) { #stream-ref } diff --git a/akka-docs/src/main/paradox/stream/stream-substream.md b/akka-docs/src/main/paradox/stream/stream-substream.md index f69edd1d1c..bfdc093fda 100644 --- a/akka-docs/src/main/paradox/stream/stream-substream.md +++ b/akka-docs/src/main/paradox/stream/stream-substream.md @@ -27,10 +27,10 @@ operators that create substreams are listed on @ref[Nesting and flattening opera A typical operation that generates substreams is `groupBy`. Scala -: @@snip [SubstreamDocSpec.scala]($code$/scala/docs/stream/SubstreamDocSpec.scala) { #groupBy1 } +: @@snip [SubstreamDocSpec.scala](/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala) { #groupBy1 } Java -: @@snip [SubstreamDocTest.java]($code$/java/jdocs/stream/SubstreamDocTest.java) { #groupBy1 } +: @@snip [SubstreamDocTest.java](/akka-docs/src/test/java/jdocs/stream/SubstreamDocTest.java) { #groupBy1 } ![stream-substream-groupBy1.png](../../images/stream-substream-groupBy1.png) @@ -46,10 +46,10 @@ all transformations are applied to all encountered substreams in the same fashio So, if you add the following `Sink`, that is added to each of the substreams as in the below diagram. Scala -: @@snip [SubstreamDocSpec.scala]($code$/scala/docs/stream/SubstreamDocSpec.scala) { #groupBy2 } +: @@snip [SubstreamDocSpec.scala](/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala) { #groupBy2 } Java -: @@snip [SubstreamDocTest.java]($code$/java/jdocs/stream/SubstreamDocTest.java) { #groupBy2 } +: @@snip [SubstreamDocTest.java](/akka-docs/src/test/java/jdocs/stream/SubstreamDocTest.java) { #groupBy2 } ![stream-substream-groupBy2.png](../../images/stream-substream-groupBy2.png) @@ -59,10 +59,10 @@ merge or concat substreams into the master stream again. The `mergeSubstreams` method merges an unbounded number of substreams back to the master stream. Scala -: @@snip [SubstreamDocSpec.scala]($code$/scala/docs/stream/SubstreamDocSpec.scala) { #groupBy3 } +: @@snip [SubstreamDocSpec.scala](/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala) { #groupBy3 } Java -: @@snip [SubstreamDocTest.java]($code$/java/jdocs/stream/SubstreamDocTest.java) { #groupBy3 } +: @@snip [SubstreamDocTest.java](/akka-docs/src/test/java/jdocs/stream/SubstreamDocTest.java) { #groupBy3 } ![stream-substream-groupBy3.png](../../images/stream-substream-groupBy3.png) @@ -70,10 +70,10 @@ You can limit the number of active substreams running and being merged at a time with either the `mergeSubstreamsWithParallelism` or `concatSubstreams` method. Scala -: @@snip [SubstreamDocSpec.scala]($code$/scala/docs/stream/SubstreamDocSpec.scala) { #groupBy4 } +: @@snip [SubstreamDocSpec.scala](/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala) { #groupBy4 } Java -: @@snip [SubstreamDocTest.java]($code$/java/jdocs/stream/SubstreamDocTest.java) { #groupBy4 } +: @@snip [SubstreamDocTest.java](/akka-docs/src/test/java/jdocs/stream/SubstreamDocTest.java) { #groupBy4 } However, since the number of running (i.e. not yet completed) substreams is capped, be careful so that these methods do not cause deadlocks with back pressure like in the below diagram. @@ -95,19 +95,19 @@ a new substream is generated, and the succeeding elements after split will flow whereas `splitAfter` flows the next element to the new substream after the element on which predicate returned true. Scala -: @@snip [SubstreamDocSpec.scala]($code$/scala/docs/stream/SubstreamDocSpec.scala) { #splitWhenAfter } +: @@snip [SubstreamDocSpec.scala](/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala) { #splitWhenAfter } Java -: @@snip [SubstreamDocTest.java]($code$/java/jdocs/stream/SubstreamDocTest.java) { #splitWhenAfter } +: @@snip [SubstreamDocTest.java](/akka-docs/src/test/java/jdocs/stream/SubstreamDocTest.java) { #splitWhenAfter } These are useful when you scanned over something and you don't need to care about anything behind it. A typical example is counting the number of characters for each line like below. Scala -: @@snip [SubstreamDocSpec.scala]($code$/scala/docs/stream/SubstreamDocSpec.scala) { #wordCount } +: @@snip [SubstreamDocSpec.scala](/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala) { #wordCount } Java -: @@snip [SubstreamDocTest.java]($code$/java/jdocs/stream/SubstreamDocTest.java) { #wordCount } +: @@snip [SubstreamDocTest.java](/akka-docs/src/test/java/jdocs/stream/SubstreamDocTest.java) { #wordCount } This prints out the following output. @@ -130,10 +130,10 @@ The function `f` of `flatMapConcat` transforms each input element into a `Source into the output stream by concatenation. Scala -: @@snip [SubstreamDocSpec.scala]($code$/scala/docs/stream/SubstreamDocSpec.scala) { #flatMapConcat } +: @@snip [SubstreamDocSpec.scala](/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala) { #flatMapConcat } Java -: @@snip [SubstreamDocTest.java]($code$/java/jdocs/stream/SubstreamDocTest.java) { #flatMapConcat } +: @@snip [SubstreamDocTest.java](/akka-docs/src/test/java/jdocs/stream/SubstreamDocTest.java) { #flatMapConcat } ![stream-substream-flatMapConcat1.png](../../images/stream-substream-flatMapConcat1.png) @@ -151,9 +151,9 @@ Elements from all the substreams are concatenated to the sink. Instead, up to `breadth` number of streams emit elements at any given time. Scala -: @@snip [SubstreamDocSpec.scala]($code$/scala/docs/stream/SubstreamDocSpec.scala) { #flatMapMerge } +: @@snip [SubstreamDocSpec.scala](/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala) { #flatMapMerge } Java -: @@snip [SubstreamDocTest.java]($code$/java/jdocs/stream/SubstreamDocTest.java) { #flatMapMerge } +: @@snip [SubstreamDocTest.java](/akka-docs/src/test/java/jdocs/stream/SubstreamDocTest.java) { #flatMapMerge } ![stream-substream-flatMapMerge.png](../../images/stream-substream-flatMapMerge.png) diff --git a/akka-docs/src/main/paradox/stream/stream-testkit.md b/akka-docs/src/main/paradox/stream/stream-testkit.md index 446b91009d..6aa9e7c77d 100644 --- a/akka-docs/src/main/paradox/stream/stream-testkit.md +++ b/akka-docs/src/main/paradox/stream/stream-testkit.md @@ -34,10 +34,10 @@ asserting on the results that sink produced. Here is an example of a test for a sink: Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #strict-collection } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #strict-collection } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #strict-collection } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #strict-collection } The same strategy can be applied for sources as well. In the next example we have a source that produces an infinite stream of elements. Such source can be @@ -45,20 +45,20 @@ tested by asserting that first arbitrary number of elements hold some condition. Here the `take` operator and `Sink.seq` are very useful. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #grouped-infinite } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #grouped-infinite } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #grouped-infinite } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #grouped-infinite } When testing a flow we need to attach a source and a sink. As both stream ends are under our control, we can choose sources that tests various edge cases of the flow and sinks that ease assertions. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #folded-stream } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #folded-stream } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #folded-stream } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #folded-stream } ## TestKit @@ -71,10 +71,10 @@ One of the more straightforward tests would be to materialize stream to a to the probe. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #pipeto-testprobe } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #pipeto-testprobe } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #pipeto-testprobe } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #pipeto-testprobe } Instead of materializing to a future, we can use a `Sink.actorRef` that sends all incoming elements to the given `ActorRef`. Now we can use @@ -83,20 +83,20 @@ arrive. We can also assert stream completion by expecting for `onCompleteMessage` which was given to `Sink.actorRef`. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #sink-actorref } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #sink-actorref } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #sink-actorref } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #sink-actorref } Similarly to `Sink.actorRef` that provides control over received elements, we can use `Source.actorRef` and have full control over elements to be sent. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #source-actorref } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #source-actorref } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #source-actorref } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #source-actorref } ## Streams TestKit @@ -112,35 +112,35 @@ A sink returned by `TestSink.probe` allows manual control over demand and assertions over elements coming downstream. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-sink-probe } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-sink-probe } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #test-sink-probe } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #test-sink-probe } A source returned by `TestSource.probe` can be used for asserting demand or controlling when stream is completed or ended with an error. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-source-probe } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-source-probe } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #test-source-probe } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #test-source-probe } You can also inject exceptions and test sink behavior on error conditions. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #injecting-failure } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #injecting-failure } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #injecting-failure } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #injecting-failure } Test source and sink can be used together in combination when testing flows. Scala -: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-source-and-sink } +: @@snip [StreamTestKitDocSpec.scala](/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-source-and-sink } Java -: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #test-source-and-sink } +: @@snip [StreamTestKitDocTest.java](/akka-docs/src/test/java/jdocs/stream/StreamTestKitDocTest.java) { #test-source-and-sink } ## Fuzzing Mode diff --git a/akka-docs/src/main/paradox/testing.md b/akka-docs/src/main/paradox/testing.md index 2d3c27fa6d..1a292a7759 100644 --- a/akka-docs/src/main/paradox/testing.md +++ b/akka-docs/src/main/paradox/testing.md @@ -38,10 +38,10 @@ The `TestKit` class contains a collection of tools which makes this common task easy. Scala -: @@snip [PlainWordSpec.scala]($code$/scala/docs/testkit/PlainWordSpec.scala) { #plain-spec } +: @@snip [PlainWordSpec.scala](/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala) { #plain-spec } Java -: @@snip [TestKitSampleTest.java]($code$/java/jdocs/testkit/TestKitSampleTest.java) { #fullsample } +: @@snip [TestKitSampleTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitSampleTest.java) { #fullsample } The `TestKit` contains an actor named `testActor` which is the entry point for messages to be examined with the various `expectMsg...` @@ -71,10 +71,10 @@ The above mentioned @scala[`expectMsg`]@java[`expectMsgEquals`] is not the only assertions concerning received messages, the full set is this: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-expect } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-expect } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-expect } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-expect } In these examples, the maximum durations you will find mentioned below are left out, in which case they use the default value from configuration item @@ -217,10 +217,10 @@ allows assertions on log messages, including those which are generated by exceptions: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #event-filter } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #event-filter } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-event-filter } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-event-filter } If a number of occurrences is specific—as demonstrated above—then `intercept` will block until that number of matching messages have been received or the @@ -248,18 +248,18 @@ you want to test timing-sensitive behavior this can come in handy. Say for instance you want to test an actor that schedules a task: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #timer } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #timer } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #timer } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #timer } You can override the method that does the scheduling in your test: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #timer-test } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #timer-test } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #timer-test } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #timer-test } ### Timing Assertions @@ -272,10 +272,10 @@ checked external to the examination, which is facilitated by a new construct for managing time constraints: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-within } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-within } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-within } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-within } The block @scala[given to]@java[in] `within` must complete after a @ref:[Duration](common/duration.md) which is between `min` and `max`, where the former defaults to zero. The @@ -318,10 +318,10 @@ You can scale other durations with the same factor by using the @scala[implicit in `akka.testkit` package object to add dilated function to `Duration`]@java[`dilated` method in `TestKit`]. Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #duration-dilation } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #duration-dilation } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #duration-dilation } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #duration-dilation } @@@ div { .group-scala } @@ -330,7 +330,7 @@ Java If you want the sender of messages inside your TestKit-based tests to be the `testActor` mix in `ImplicitSender` into your test. -@@snip [PlainWordSpec.scala]($code$/scala/docs/testkit/PlainWordSpec.scala) { #implicit-sender } +@@snip [PlainWordSpec.scala](/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala) { #implicit-sender } @@@ @@ -345,12 +345,12 @@ implementation called `TestProbe`.] The functionality is best explained using a small example: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #imports-test-probe } -@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #my-double-echo } -@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #imports-test-probe } +@@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #my-double-echo } +@@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-probe } @scala[Here the system under test is simulated by `MyDoubleEcho`, which is supposed to mirror its input to two outputs. Attaching two test probes enables @@ -365,19 +365,19 @@ If you have many test probes, you can name them to get meaningful actor names in test logs and assertions: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-with-custom-name } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-with-custom-name } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-with-custom-name } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-with-custom-name } Probes may also be equipped with custom assertions to make your test code even more concise and clear: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-special-probe } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-special-probe } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-special-probe } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-special-probe } You have complete flexibility here in mixing and matching the `TestKit` facilities with your own checks and choosing an intuitive name for it. In real @@ -400,10 +400,10 @@ means that it is dangerous to try watching e.g. `TestActorRef` from a A @scala[`TestProbe`]@java[`TestKit`] can register itself for DeathWatch of any other actor: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-watch } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-watch } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-watch } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-watch } #### Replying to Messages Received by Probes @@ -414,10 +414,10 @@ so they can also reply]@java[The probe stores the sender of the last dequeued me for having the probe reply to the last received message]: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-reply } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-reply } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-reply } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-reply } #### Forwarding Messages Received by Probes @@ -429,11 +429,11 @@ network functioning]@java[The probe can also forward a received message (i.e. af reception), retaining the original sender]: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-forward-actors } -@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-forward } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-forward-actors } +@@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-forward } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-forward } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-forward } @scala[The `dest` actor will receive the same message invocation as if no test probe had intervened.] @@ -448,10 +448,10 @@ This code can be used to forward messages, e.g. in a chain `A --> Probe --> B`, as long as a certain protocol is obeyed. Scala -: @@snip [TestProbeSpec.scala]($akka$/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala) { #autopilot } +: @@snip [TestProbeSpec.scala](/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala) { #autopilot } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-auto-pilot } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-auto-pilot } The `run` method must return the auto-pilot for the next message, @scala[which may be `KeepRunning` to retain the current one or `NoAutoPilot` @@ -467,10 +467,10 @@ do not react to each other's deadlines or to the deadline set in an enclosing `TestKit` instance: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-within-probe } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-within-probe } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-within-probe } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-within-probe } Here, the @scala[`expectMsg`]@java[`expectMsgEquals`] call will use the default timeout. @@ -492,10 +492,10 @@ Conversely, a parent's binding to its child can be lessened as follows: For example, the structure of the code you want to test may follow this pattern: Scala -: @@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-example } +: @@snip [ParentChildSpec.scala](/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala) { #test-example } Java -: @@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-example } +: @@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #test-example } #### Introduce child to its parent @@ -503,10 +503,10 @@ The first option is to avoid use of the `context.parent` function and create a child with a custom parent by passing an explicit reference to its parent instead. Scala -: @@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-dependentchild } +: @@snip [ParentChildSpec.scala](/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala) { #test-dependentchild } Java -: @@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-dependentchild } +: @@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #test-dependentchild } #### Create the child using @scala[TestProbe]@java[TestKit] @@ -515,10 +515,10 @@ This will cause any messages the child actor sends to @scala[*context.parent*]@j end up in the test probe. Scala -: @@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-TestProbe-parent } +: @@snip [ParentChildSpec.scala](/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala) { #test-TestProbe-parent } Java -: @@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-TestProbe-parent } +: @@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #test-TestProbe-parent } #### Using a fabricated parent @@ -527,11 +527,11 @@ create a fabricated parent in your test. This, however, does not enable you to t the parent actor in isolation. Scala -: @@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-fabricated-parent } +: @@snip [ParentChildSpec.scala](/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala) { #test-fabricated-parent } Java -: @@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-fabricated-parent-creator } -@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-fabricated-parent } +: @@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #test-fabricated-parent-creator } +@@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #test-fabricated-parent } #### Externalize child making from the parent @@ -539,28 +539,28 @@ Alternatively, you can tell the parent how to create its child. There are two wa to do this: by giving it a `Props` object or by giving it a function which takes care of creating the child actor: Scala -: @@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-dependentparent } +: @@snip [ParentChildSpec.scala](/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala) { #test-dependentparent } Java -: @@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-dependentparent } -@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-dependentparent-generic } +: @@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #test-dependentparent } +@@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #test-dependentparent-generic } Creating the @scala[`Props`]@java[`Actor`] is straightforward and the function may look like this in your test code: Scala -: @@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #child-maker-test } +: @@snip [ParentChildSpec.scala](/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala) { #child-maker-test } Java -: @@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #child-maker-test } +: @@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #child-maker-test } And like this in your application code: Scala -: @@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #child-maker-prod } +: @@snip [ParentChildSpec.scala](/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala) { #child-maker-prod } Java -: @@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #child-maker-prod } +: @@snip [ParentChildTest.java](/akka-docs/src/test/java/jdocs/testkit/ParentChildTest.java) { #child-maker-prod } Which of these methods is the best depends on what is most important to test. The most generic option is to create the parent actor by passing it a function that is @@ -581,10 +581,10 @@ so long as all intervening actors run on this dispatcher. Just set the dispatcher as you normally would: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #calling-thread-dispatcher } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #calling-thread-dispatcher } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #calling-thread-dispatcher } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #calling-thread-dispatcher } ### How it works @@ -718,7 +718,7 @@ options: `akka.actor.debug.receive` — which enables the `loggable` statement to be applied to an actor’s `receive` function: -@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #logging-receive } +@@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #logging-receive } If the aforementioned setting is not given in the @ref:[Configuration](general/configuration.md#config-akka-actor), this method will pass through the given `Receive` function unmodified, meaning that @@ -774,7 +774,7 @@ support. If for some reason it is a problem to inherit from `TestKit` due to it being a concrete class instead of a trait, there’s `TestKitBase`: -@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-kit-base } +@@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-kit-base } The `implicit lazy val system` must be declared exactly like that (you can of course pass arguments to the actor system factory as needed) because trait @@ -819,7 +819,7 @@ Ray Roestenburg's example code from his blog, which unfortunately is only availa [web archive](https://web.archive.org/web/20180114133958/http://roestenburg.agilesquad.com/2011/02/unit-testing-akka-actors-with-testkit_12.html), adapted to work with Akka 2.x. -@@snip [TestKitUsageSpec.scala]($code$/scala/docs/testkit/TestKitUsageSpec.scala) { #testkit-usage } +@@snip [TestKitUsageSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala) { #testkit-usage } @@@ @@ -865,10 +865,10 @@ traditional unit testing techniques on the contained methods. Obtaining a reference is done like this: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-actor-ref } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-actor-ref } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-actor-ref } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-actor-ref } Since `TestActorRef` is generic in the actor type it returns the underlying actor with its proper static type. From this point on you may bring @@ -884,7 +884,7 @@ If your actor under test is a `FSM`, you may use the special `TestFSMRef` which offers all features of a normal `TestActorRef` and in addition allows access to the internal state: -@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-fsm-ref } +@@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-fsm-ref } Due to a limitation in Scala’s type inference, there is only the factory method shown above, so you will probably write code like `TestFSMRef(new MyFSM)` @@ -914,10 +914,10 @@ described below (see [CallingThreadDispatcher](#callingthreaddispatcher)); this implicitly for any actor instantiated into a `TestActorRef`. Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-behavior } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-behavior } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-behavior } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-behavior } As the `TestActorRef` is a subclass of `LocalActorRef` with a few special extras, also aspects like supervision and restarting work properly, but @@ -948,10 +948,10 @@ the `receive` method on `TestActorRef`, which will be forwarded to the underlying actor: Scala -: @@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-expecting-exceptions } +: @@snip [TestkitDocSpec.scala](/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #test-expecting-exceptions } Java -: @@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-expecting-exceptions } +: @@snip [TestKitDocTest.java](/akka-docs/src/test/java/jdocs/testkit/TestKitDocTest.java) { #test-expecting-exceptions } ### Use Cases diff --git a/akka-docs/src/main/paradox/typed-actors.md b/akka-docs/src/main/paradox/typed-actors.md index ae5526d614..b2d8f8e190 100644 --- a/akka-docs/src/main/paradox/typed-actors.md +++ b/akka-docs/src/main/paradox/typed-actors.md @@ -47,10 +47,10 @@ Before we create our first Typed Actor we should first go through the tools that it's located in `akka.actor.TypedActor`. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-extension-tools } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-extension-tools } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-extension-tools } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-extension-tools } @@@ warning @@ -68,61 +68,61 @@ To create a Typed Actor you need to have one or more interfaces, and one impleme The following imports are assumed: Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #imports } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #imports } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #imports } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #imports } Our example interface: Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } Our example implementation of that interface: Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } The most trivial way of creating a Typed Actor instance of our `Squarer`: Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create1 } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create1 } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create1 } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create1 } First type is the type of the proxy, the second type is the type of the implementation. If you need to call a specific constructor you do it like this: Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create2 } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create2 } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create2 } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create2 } Since you supply a `Props`, you can specify which dispatcher to use, what the default timeout should be used and more. Now, our `Squarer` doesn't have any methods, so we'd better add those. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } Alright, now we've got some methods we can call, but we need to implement those in `SquarerImpl`. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } Excellent, now we have an interface and an implementation of that interface, and we know how to create a Typed Actor from that, so let's look at calling these methods. @@ -166,29 +166,29 @@ we *strongly* recommend that parameters passed are immutable. ### One-way message send Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-oneway } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-oneway } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-oneway } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-oneway } As simple as that! The method will be executed on another thread; asynchronously. ### Request-reply message send Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-option } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-option } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-option } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-option } This will block for as long as the timeout that was set in the `Props` of the Typed Actor, if needed. It will return `None` if a timeout occurs. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-strict } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-strict } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-strict } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-strict } This will block for as long as the timeout that was set in the `Props` of the Typed Actor, if needed. It will throw a `java.util.concurrent.TimeoutException` if a timeout occurs. @@ -206,10 +206,10 @@ interface method. ### Request-reply-with-future message send Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-future } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-future } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-future } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-future } This call is asynchronous, and the Future returned can be used for asynchronous composition. @@ -218,18 +218,18 @@ This call is asynchronous, and the Future returned can be used for asynchronous Since Akka's Typed Actors are backed by Akka Actors they must be stopped when they aren't needed anymore. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-stop } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-stop } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-stop } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-stop } This asynchronously stops the Typed Actor associated with the specified proxy ASAP. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-poisonpill } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-poisonpill } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-poisonpill } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-poisonpill } This asynchronously stops the Typed Actor associated with the specified proxy after it's done with all calls that were made prior to this call. @@ -240,10 +240,10 @@ Since you can obtain a contextual Typed Actor Extension by passing in an `ActorC you can create child Typed Actors by invoking `typedActorOf(..)` on that. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-hierarchy } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-hierarchy } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-hierarchy } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-hierarchy } You can also create a child Typed Actor in regular Akka Actors by giving the @scala[`ActorContext`]@java[`AbstractActor.ActorContext`] as an input parameter to `TypedActor.get(…)`. @@ -289,10 +289,10 @@ The ActorRef needs to accept `MethodCall` messages. Since `TypedActors` are backed by `Akka Actors`, you can use `typedActorOf` to proxy `ActorRefs` potentially residing on remote nodes. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-remote } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-remote } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-remote } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-remote } @@@ div {.group-scala} @@ -300,9 +300,9 @@ Java Here's an example on how you can use traits to mix in behavior in your Typed Actors. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge } +@@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge } -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge-usage } +@@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge-usage } @@@ @@ -315,17 +315,17 @@ Routers are not provided directly for typed actors, but it is really easy to lev To showcase this let's create typed actors that assign themselves some random `id`, so we know that in fact, the router has sent the message to different actors: Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router-types } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router-types } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-router-types } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-router-types } In order to round robin among a few instances of such actors, you can create a plain untyped router, and then facade it with a `TypedActor` like shown in the example below. This works because typed actors communicate using the same mechanisms as normal actors, and methods calls on them get transformed into message sends of `MethodCall` messages. Scala -: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router } +: @@snip [TypedActorDocSpec.scala](/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router } Java -: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-router } +: @@snip [TypedActorDocTest.java](/akka-docs/src/test/java/jdocs/actor/TypedActorDocTest.java) { #typed-router } diff --git a/akka-docs/src/main/paradox/typed/actor-discovery.md b/akka-docs/src/main/paradox/typed/actor-discovery.md index 531566ba64..20100cafb3 100644 --- a/akka-docs/src/main/paradox/typed/actor-discovery.md +++ b/akka-docs/src/main/paradox/typed/actor-discovery.md @@ -36,28 +36,28 @@ First we create a `PingService` actor and register it with the `Receptionist` ag `ServiceKey` that will later be used to lookup the reference: Scala -: @@snip [ReceptionistExample]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala) { #ping-service } +: @@snip [ReceptionistExample](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala) { #ping-service } Java -: @@snip [ReceptionistExample]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/ReceptionistExampleTest.java) { #ping-service } +: @@snip [ReceptionistExample](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/ReceptionistExampleTest.java) { #ping-service } Then we have another actor that requires a `PingService` to be constructed: Scala -: @@snip [ReceptionistExample]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala) { #pinger } +: @@snip [ReceptionistExample](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala) { #pinger } Java -: @@snip [ReceptionistExample]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/ReceptionistExampleTest.java) { #pinger } +: @@snip [ReceptionistExample](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/ReceptionistExampleTest.java) { #pinger } Finally in the guardian actor we spawn the service as well as subscribing to any actors registering against the `ServiceKey`. Subscribing means that the guardian actor will be informed of any new registrations via a `Listing` message: Scala -: @@snip [ReceptionistExample]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala) { #pinger-guardian } +: @@snip [ReceptionistExample](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala) { #pinger-guardian } Java -: @@snip [ReceptionistExample]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/ReceptionistExampleTest.java) { #pinger-guardian } +: @@snip [ReceptionistExample](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/ReceptionistExampleTest.java) { #pinger-guardian } Each time a new (which is just a single time in this example) `PingService` is registered the guardian actor spawns a pinger to ping it. diff --git a/akka-docs/src/main/paradox/typed/actor-lifecycle.md b/akka-docs/src/main/paradox/typed/actor-lifecycle.md index 124b6c38c8..1bfc550518 100644 --- a/akka-docs/src/main/paradox/typed/actor-lifecycle.md +++ b/akka-docs/src/main/paradox/typed/actor-lifecycle.md @@ -25,10 +25,10 @@ The root actor, also called the guardian actor, is created along with the `Actor The root actor is defined by the behavior used to create the `ActorSystem`, named `HelloWorldMain.main` in the example below: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world } Child actors are spawned with @unidoc[akka.actor.typed.ActorContext]'s `spawn`. In the example below, when the root actor @@ -36,19 +36,19 @@ is started, it spawns a child actor described by the behavior `HelloWorld.greete `Start` message, it creates a child actor defined by the behavior `HelloWorldBot.bot`: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-main } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-main } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-main } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-main } To specify a dispatcher when spawning an actor use @unidoc[DispatcherSelector]. If not specified, the actor will use the default dispatcher, see @ref:[Default dispatcher](../dispatchers.md#default-dispatcher) for details. Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-main-with-dispatchers } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-main-with-dispatchers } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-main-with-dispatchers } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-main-with-dispatchers } Refer to @ref:[Actors](actors.md#introduction) for a walk-through of the above examples. @@ -68,18 +68,18 @@ similar to how `ActorSystem.actorOf` can be used in untyped actors with the diff The guardian behavior can be defined as: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala) { #imports1 #main } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala) { #imports1 #main } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/SpawnProtocolDocTest.java) { #imports1 #main } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/SpawnProtocolDocTest.java) { #imports1 #main } and the `ActorSystem` can be created with that `main` behavior and asked to spawn other actors: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala) { #imports2 #system-spawn } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala) { #imports2 #system-spawn } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/SpawnProtocolDocTest.java) { #imports2 #system-spawn } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/SpawnProtocolDocTest.java) { #imports2 #system-spawn } The `SpawnProtocol` can also be used at other places in the actor hierarchy. It doesn't have to be the root guardian actor. @@ -100,7 +100,7 @@ if different actions is needed when the actor gracefully stops itself from when Here is an illustrating example: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala) { +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala) { #imports #master-actor #worker-actor @@ -108,7 +108,7 @@ Scala } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/GracefulStopDocTest.java) { +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/GracefulStopDocTest.java) { #imports #master-actor #worker-actor diff --git a/akka-docs/src/main/paradox/typed/actors.md b/akka-docs/src/main/paradox/typed/actors.md index a7d106da37..5b9fa27cbd 100644 --- a/akka-docs/src/main/paradox/typed/actors.md +++ b/akka-docs/src/main/paradox/typed/actors.md @@ -28,19 +28,19 @@ look like? In all of the following these imports are assumed: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #imports } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #imports } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #imports } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #imports } With these in place we can define our first Actor, and it will say hello! Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-actor } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-actor } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-actor } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-actor } This small piece of code defines two message types, one for commanding the Actor to greet someone and one that the Actor will use to confirm that it has @@ -80,10 +80,10 @@ of additional greeting messages and collect the replies until a given max number of messages have been reached. Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-bot } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-bot } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-bot } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-bot } Note how this Actor manages the counter by changing the behavior for each `Greeted` reply rather than using any variables. @@ -93,18 +93,18 @@ rather than using any variables. A third actor spawns the `greeter` and the `bot` and starts the interaction between those. Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-main } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-main } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-main } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-main } Now we want to try out this Actor, so we must start an ActorSystem to host it: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world } We start an Actor system from the defined `main` behavior and send two `Start` messages that will kick-off the interaction between two separate `bot` actors and the single `greeter` actor. @@ -141,10 +141,10 @@ chat room Actor will disseminate all posted messages to all currently connected client Actors. The protocol definition could look like the following: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-protocol } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-protocol } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-protocol } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-protocol } Initially the client Actors only get access to an @scala[`ActorRef[GetSession]`]@java[`ActorRef`] which allows them to make the first step. Once a client’s session has been @@ -161,10 +161,10 @@ full protocol that can involve multiple Actors and that can evolve over multiple steps. Here's the implementation of the chat room protocol: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-behavior } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-behavior } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-behavior } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-behavior } The state is managed by changing behavior rather than using any variables. @@ -204,10 +204,10 @@ problematic, so passing an @scala[`ActorRef[PublishSessionMessage]`]@java[`Actor In order to see this chat room in action we need to write a client Actor that can use it: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-gabbler } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-gabbler } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-gabbler } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-gabbler } From this behavior we can create an Actor that will accept a chat room session, post a message, wait to see it published, and then terminate. The last step @@ -232,10 +232,10 @@ nonsensical) or we start both of them from a third Actor—our only sensible choice: Scala -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-main } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-main } Java -: @@snip [IntroSpec.scala]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-main } +: @@snip [IntroSpec.scala](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-main } In good tradition we call the `main` Actor what it is, it directly corresponds to the `main` method in a traditional Java application. This diff --git a/akka-docs/src/main/paradox/typed/cluster-sharding.md b/akka-docs/src/main/paradox/typed/cluster-sharding.md index 05bf69c289..b30137c326 100644 --- a/akka-docs/src/main/paradox/typed/cluster-sharding.md +++ b/akka-docs/src/main/paradox/typed/cluster-sharding.md @@ -29,35 +29,35 @@ This module is currently marked as @ref:[may change](../common/may-change.md) in Sharding is accessed via the `ClusterSharding` extension Scala -: @@snip [ShardingCompileOnlySpec.scala]($akka$/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #sharding-extension } +: @@snip [ShardingCompileOnlySpec.scala](/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #sharding-extension } Java -: @@snip [ShardingCompileOnlyTest.java]($akka$/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #import #sharding-extension } +: @@snip [ShardingCompileOnlyTest.java](/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #import #sharding-extension } It is common for sharding to be used with persistence however any Behavior can be used with sharding e.g. a basic counter: Scala -: @@snip [ShardingCompileOnlySpec.scala]($akka$/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #counter } +: @@snip [ShardingCompileOnlySpec.scala](/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #counter } Java -: @@snip [ShardingCompileOnlyTest.java]($akka$/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #counter } +: @@snip [ShardingCompileOnlyTest.java](/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #counter } Each Entity type has a key that is then used to retrieve an EntityRef for a given entity identifier. Scala -: @@snip [ShardingCompileOnlySpec.scala]($akka$/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #spawn } +: @@snip [ShardingCompileOnlySpec.scala](/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #spawn } Java -: @@snip [ShardingCompileOnlyTest.java]($akka$/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #spawn } +: @@snip [ShardingCompileOnlyTest.java](/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #spawn } Messages to a specific entity are then sent via an EntityRef. It is also possible to wrap methods in a `ShardingEnvelop` or define extractor functions and send messages directly to the shard region. Scala -: @@snip [ShardingCompileOnlySpec.scala]($akka$/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #send } +: @@snip [ShardingCompileOnlySpec.scala](/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #send } Java -: @@snip [ShardingCompileOnlyTest.java]($akka$/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #send } +: @@snip [ShardingCompileOnlyTest.java](/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #send } ## Persistence example @@ -69,12 +69,12 @@ Taking the larger example from the @ref:[persistence documentation](persistence. a sharded entity is the same as for a non persistent behavior. The behavior: Scala -: @@snip [InDepthPersistentBehaviorSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #behavior } +: @@snip [InDepthPersistentBehaviorSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #behavior } To create the entity: Scala -: @@snip [ShardingCompileOnlySpec.scala]($akka$/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #persistence } +: @@snip [ShardingCompileOnlySpec.scala](/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #persistence } Sending messages to entities is the same as the example above. The only difference is when an entity is moved the state will be restored. See @ref:[persistence](persistence.md) for more details. diff --git a/akka-docs/src/main/paradox/typed/cluster-singleton.md b/akka-docs/src/main/paradox/typed/cluster-singleton.md index 93624e6c21..8284ea1751 100644 --- a/akka-docs/src/main/paradox/typed/cluster-singleton.md +++ b/akka-docs/src/main/paradox/typed/cluster-singleton.md @@ -42,20 +42,20 @@ instance will eventually be started. Any `Behavior` can be run as a singleton. E.g. a basic counter: Scala -: @@snip [ShardingCompileOnlySpec.scala]($akka$/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #counter } +: @@snip [ShardingCompileOnlySpec.scala](/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #counter } Java -: @@snip [ShardingCompileOnlyTest.java]($akka$/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #counter } +: @@snip [ShardingCompileOnlyTest.java](/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #counter } Then on every node in the cluster, or every node with a given role, use the `ClusterSingleton` extension to spawn the singleton. An instance will per data centre of the cluster: Scala -: @@snip [ShardingCompileOnlySpec.scala]($akka$/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #singleton } +: @@snip [ShardingCompileOnlySpec.scala](/akka-cluster-sharding-typed/src/test/scala/doc/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala) { #singleton } Java -: @@snip [ShardingCompileOnlyTest.java]($akka$/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #singleton } +: @@snip [ShardingCompileOnlyTest.java](/akka-cluster-sharding-typed/src/test/java/jdoc/akka/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #singleton } ## Accessing singleton of another data centre diff --git a/akka-docs/src/main/paradox/typed/cluster.md b/akka-docs/src/main/paradox/typed/cluster.md index 372afd15af..e0c8849628 100644 --- a/akka-docs/src/main/paradox/typed/cluster.md +++ b/akka-docs/src/main/paradox/typed/cluster.md @@ -29,14 +29,14 @@ This module is currently marked as @ref:[may change](../common/may-change.md) in All of the examples below assume the following imports: Scala -: @@snip [BasicClusterExampleSpec.scala]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-imports } +: @@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-imports } Java -: @@snip [BasicClusterExampleTest.java]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-imports } +: @@snip [BasicClusterExampleTest.java](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-imports } And the minimum configuration required is to set a host/port for remoting and the `akka.actor.provider = "cluster"`. -@@snip [BasicClusterExampleSpec.scala]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #config-seeds } +@@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #config-seeds } ## Cluster API extension @@ -48,10 +48,10 @@ references, i.e. it’s a message based API. The references are on the `Cluster` extension: Scala -: @@snip [BasicClusterExampleSpec.scala]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-create } +: @@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-create } Java -: @@snip [BasicClusterExampleTest.java]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-create } +: @@snip [BasicClusterExampleTest.java](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-create } The Cluster extensions gives you access to: @@ -65,18 +65,18 @@ The Cluster extensions gives you access to: If not using configuration to specify seeds joining the cluster can be done programmatically via the `manager`. Scala -: @@snip [BasicClusterExampleSpec.scala]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-join } +: @@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-join } Java -: @@snip [BasicClusterExampleTest.java]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-join } +: @@snip [BasicClusterExampleTest.java](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-join } Leaving and downing are similar e.g. Scala -: @@snip [BasicClusterExampleSpec.scala]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-leave } +: @@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-leave } Java -: @@snip [BasicClusterExampleTest.java]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-leave } +: @@snip [BasicClusterExampleTest.java](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-leave } ### Cluster subscriptions @@ -87,18 +87,18 @@ for the node going through the lifecycle described in @ref:[Cluster Specificatio This example subscribes with a `TestProbe` but in a real application it would be an Actor: Scala -: @@snip [BasicClusterExampleSpec.scala]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-subscribe } +: @@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-subscribe } Java -: @@snip [BasicClusterExampleTest.java]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-subscribe } +: @@snip [BasicClusterExampleTest.java](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-subscribe } Then asking a node to leave: Scala -: @@snip [BasicClusterExampleSpec.scala]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-leave-example } +: @@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala) { #cluster-leave-example } Java -: @@snip [BasicClusterExampleTest.java]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-leave-example } +: @@snip [BasicClusterExampleTest.java](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/BasicClusterExampleTest.java) { #cluster-leave-example } ## Serialization @@ -108,7 +108,7 @@ since there is no `sender`. To serialize actor references to/from string represe For example here's how a serializer could look for the `Ping` and `Pong` messages above: Scala -: @@snip [PingSerializer.scala]($akka$/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/PingSerializer.scala) { #serializer } +: @@snip [PingSerializer.scala](/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/PingSerializer.scala) { #serializer } Java -: @@snip [PingSerializerExampleTest.java]($akka$/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/PingSerializerExampleTest.java) { #serializer } +: @@snip [PingSerializerExampleTest.java](/akka-cluster-typed/src/test/java/jdocs/akka/cluster/typed/PingSerializerExampleTest.java) { #serializer } diff --git a/akka-docs/src/main/paradox/typed/coexisting.md b/akka-docs/src/main/paradox/typed/coexisting.md index 51023509f7..d4e97ef9a4 100644 --- a/akka-docs/src/main/paradox/typed/coexisting.md +++ b/akka-docs/src/main/paradox/typed/coexisting.md @@ -31,7 +31,7 @@ Typed and untyped can interact the following ways: In the examples the `akka.actor` package is aliased to `untyped`. Scala -: @@snip [UntypedWatchingTypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #import-alias } +: @@snip [UntypedWatchingTypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #import-alias } @@@ @@ -43,44 +43,44 @@ While coexisting your application will likely still have an untyped ActorSystem. so that new code and migrated parts don't rely on the untyped system: Scala -: @@snip [UntypedWatchingTypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #convert-untyped } +: @@snip [UntypedWatchingTypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #convert-untyped } Java -: @@snip [UntypedWatchingTypedTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #convert-untyped } +: @@snip [UntypedWatchingTypedTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #convert-untyped } Then for new typed actors here's how you create, watch and send messages to it from an untyped actor. Scala -: @@snip [UntypedWatchingTypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #typed } +: @@snip [UntypedWatchingTypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #typed } Java -: @@snip [UntypedWatchingTypedTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #typed } +: @@snip [UntypedWatchingTypedTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #typed } The top level untyped actor is created in the usual way: Scala -: @@snip [UntypedWatchingTypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #create-untyped } +: @@snip [UntypedWatchingTypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #create-untyped } Java -: @@snip [UntypedWatchingTypedTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #create-untyped } +: @@snip [UntypedWatchingTypedTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #create-untyped } Then it can create a typed actor, watch it, and send a message to it: Scala -: @@snip [UntypedWatchingTypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #untyped-watch } +: @@snip [UntypedWatchingTypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #untyped-watch } Java -: @@snip [UntypedWatchingTypedTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #untyped-watch } +: @@snip [UntypedWatchingTypedTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #untyped-watch } @scala[There is one `import` that is needed to make that work.] @java[We import the Adapter class and call static methods for conversion.] Scala -: @@snip [UntypedWatchingTypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #adapter-import } +: @@snip [UntypedWatchingTypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/UntypedWatchingTypedSpec.scala) { #adapter-import } Java -: @@snip [UntypedWatchingTypedTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #adapter-import } +: @@snip [UntypedWatchingTypedTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/UntypedWatchingTypedTest.java) { #adapter-import } @scala[That adds some implicit extension methods that are added to untyped and typed `ActorSystem` and `ActorContext` in both directions.] @@ -94,26 +94,26 @@ The following will show how to create, watch and send messages back and forth fr untyped actor: Scala -: @@snip [TypedWatchingUntypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala) { #untyped } +: @@snip [TypedWatchingUntypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala) { #untyped } Java -: @@snip [TypedWatchingUntypedTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/TypedWatchingUntypedTest.java) { #untyped } +: @@snip [TypedWatchingUntypedTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/TypedWatchingUntypedTest.java) { #untyped } Creating the actor system and the typed actor: Scala -: @@snip [TypedWatchingUntypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala) { #create } +: @@snip [TypedWatchingUntypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala) { #create } Java -: @@snip [TypedWatchingUntypedTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/TypedWatchingUntypedTest.java) { #create } +: @@snip [TypedWatchingUntypedTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/TypedWatchingUntypedTest.java) { #create } Then the typed actor creates the untyped actor, watches it and sends and receives a response: Scala -: @@snip [TypedWatchingUntypedSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala) { #typed } +: @@snip [TypedWatchingUntypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala) { #typed } Java -: @@snip [TypedWatchingUntypedTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/TypedWatchingUntypedTest.java) { #typed } +: @@snip [TypedWatchingUntypedTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/coexistence/TypedWatchingUntypedTest.java) { #typed } There is one caveat regarding supervision of untyped child from typed parent. If the child throws an exception we would expect it to be restarted, but supervision in Akka Typed defaults to stopping the child in case it fails. The restarting facilities in Akka Typed will not work with untyped children. However, the workaround is to add another untyped actor that takes care of the supervision, i.e. restarts in case of failure if that is the desired behavior. diff --git a/akka-docs/src/main/paradox/typed/dispatchers.md b/akka-docs/src/main/paradox/typed/dispatchers.md index 4245d35dc2..e102886f27 100644 --- a/akka-docs/src/main/paradox/typed/dispatchers.md +++ b/akka-docs/src/main/paradox/typed/dispatchers.md @@ -25,10 +25,10 @@ details @ref:[here](../dispatchers.md#blocking-needs-careful-management). To select a dispatcher use `DispatcherSelector` to create a `Props` instance for spawning your actor: Scala -: @@snip [DispatcherDocSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala) { #spawn-dispatcher } +: @@snip [DispatcherDocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala) { #spawn-dispatcher } Java -: @@snip [DispatcherDocTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/DispatchersDocTest.java) { #spawn-dispatcher } +: @@snip [DispatcherDocTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/DispatchersDocTest.java) { #spawn-dispatcher } `DispatcherSelector` has two convenience methods to look up the default dispatcher and a dispatcher you can use to execute actors that block e.g. a legacy database API that does not support @scala[`Future`]@java[`CompletionStage`]s. @@ -36,9 +36,9 @@ execute actors that block e.g. a legacy database API that does not support @scal The final example shows how to load a custom dispatcher from configuration and replies on this being in your application.conf: Scala -: @@snip [DispatcherDocSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala) { #config } +: @@snip [DispatcherDocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala) { #config } Java -: @@snip [DispatcherDocSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala) { #config } +: @@snip [DispatcherDocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala) { #config } For full details on how to configure custom dispatchers see the @ref:[untyped docs](../dispatchers.md#types-of-dispatchers). diff --git a/akka-docs/src/main/paradox/typed/fault-tolerance.md b/akka-docs/src/main/paradox/typed/fault-tolerance.md index 837d444d97..ce042f68b2 100644 --- a/akka-docs/src/main/paradox/typed/fault-tolerance.md +++ b/akka-docs/src/main/paradox/typed/fault-tolerance.md @@ -21,36 +21,36 @@ In Akka Typed this "somewhere else" is called supervision. Supervision allows yo Scala -: @@snip [SupervisionCompileOnly.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #restart } +: @@snip [SupervisionCompileOnly.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #restart } Java -: @@snip [SupervisionCompileOnlyTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #restart } +: @@snip [SupervisionCompileOnlyTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #restart } Or to resume, ignore the failure and process the next message, instead: Scala -: @@snip [SupervisionCompileOnly.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #resume } +: @@snip [SupervisionCompileOnly.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #resume } Java -: @@snip [SupervisionCompileOnlyTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #resume } +: @@snip [SupervisionCompileOnlyTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #resume } More complicated restart strategies can be used e.g. to restart no more than 10 times in a 10 second period: Scala -: @@snip [SupervisionCompileOnly.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #restart-limit } +: @@snip [SupervisionCompileOnly.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #restart-limit } Java -: @@snip [SupervisionCompileOnlyTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #restart-limit } +: @@snip [SupervisionCompileOnlyTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #restart-limit } To handle different exceptions with different strategies calls to `supervise` can be nested: Scala -: @@snip [SupervisionCompileOnly.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #multiple } +: @@snip [SupervisionCompileOnly.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #multiple } Java -: @@snip [SupervisionCompileOnlyTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #multiple } +: @@snip [SupervisionCompileOnlyTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #multiple } For a full list of strategies see the public methods on `SupervisorStrategy` @@ -59,18 +59,18 @@ For a full list of strategies see the public methods on `SupervisorStrategy` It is very common to store state by changing behavior e.g. Scala -: @@snip [SupervisionCompileOnly.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #wrap } +: @@snip [SupervisionCompileOnly.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #wrap } Java -: @@snip [SupervisionCompileOnlyTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #wrap } +: @@snip [SupervisionCompileOnlyTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #wrap } When doing this supervision only needs to be added to the top level: Scala -: @@snip [SupervisionCompileOnly.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #top-level } +: @@snip [SupervisionCompileOnly.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala) { #top-level } Java -: @@snip [SupervisionCompileOnlyTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #top-level } +: @@snip [SupervisionCompileOnlyTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/supervision/SupervisionCompileOnlyTest.java) { #top-level } Each returned behavior will be re-wrapped automatically with the supervisor. @@ -94,7 +94,7 @@ There might be cases when you want the original exception to bubble up the hiera Scala -: @@snip [FaultToleranceDocSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala) { #bubbling-example } +: @@snip [FaultToleranceDocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala) { #bubbling-example } Java -: @@snip [SupervisionCompileOnlyTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/FaultToleranceDocTest.java) { #bubbling-example } \ No newline at end of file +: @@snip [SupervisionCompileOnlyTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/FaultToleranceDocTest.java) { #bubbling-example } diff --git a/akka-docs/src/main/paradox/typed/fsm.md b/akka-docs/src/main/paradox/typed/fsm.md index 5ca7af0275..f3f48ed60a 100644 --- a/akka-docs/src/main/paradox/typed/fsm.md +++ b/akka-docs/src/main/paradox/typed/fsm.md @@ -13,10 +13,10 @@ the @ref[untyped actor FSM docs](../fsm.md). It demonstrates how to: The events the FSM can receive become the type of message the Actor can receive: Scala -: @@snip [FSMSocSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala) { #simple-events } +: @@snip [FSMSocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala) { #simple-events } Java -: @@snip [FSMSocTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/FSMDocTest.java) { #simple-events } +: @@snip [FSMSocTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/FSMDocTest.java) { #simple-events } `SetTarget` is needed for starting it up, setting the destination for the `Batches` to be passed on; `Queue` will add to the internal queue while @@ -26,19 +26,19 @@ Untyped `FSM`s also have a `D` (data) type parameter. Akka Typed doesn't need to via defining your behaviors as methods. Scala -: @@snip [FSMSocSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala) { #storing-state } +: @@snip [FSMSocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala) { #storing-state } Java -: @@snip [FSMSocTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/FSMDocTest.java) { #storing-state } +: @@snip [FSMSocTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/FSMDocTest.java) { #storing-state } Each state becomes a distinct behavior. No explicit `goto` is required as Akka Typed already requires you return the next behavior. Scala -: @@snip [FSMSocSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala) { #simple-state } +: @@snip [FSMSocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala) { #simple-state } Java -: @@snip [FSMSocTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/FSMDocTest.java) { #simple-state} +: @@snip [FSMSocTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/FSMDocTest.java) { #simple-state} To set state timeouts use `Behaviors.withTimers` along with a `startSingleTimer`. diff --git a/akka-docs/src/main/paradox/typed/interaction-patterns.md b/akka-docs/src/main/paradox/typed/interaction-patterns.md index ed093d2e5b..35179a183d 100644 --- a/akka-docs/src/main/paradox/typed/interaction-patterns.md +++ b/akka-docs/src/main/paradox/typed/interaction-patterns.md @@ -25,19 +25,19 @@ Tell is asynchronous which means that the method returns right away, when the st With the given protocol and actor behavior: Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #fire-and-forget-definition } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #fire-and-forget-definition } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #fire-and-forget-definition } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #fire-and-forget-definition } Fire and forget looks like this: Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #fire-and-forget-doit } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #fire-and-forget-doit } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #fire-and-forget-doit } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #fire-and-forget-doit } **Useful when:** @@ -60,28 +60,28 @@ In Akka Typed the recipient of responses has to be encoded as a field in the mes With the following protocol: Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #request-response-protocol } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #request-response-protocol } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #request-response-protocol } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #request-response-protocol } The sender would use its own @scala[`ActorRef[Response]`]@java[`ActorRef`], which it can access through @scala[`ActorContext.self`]@java[`ActorContext.getSelf()`], for the `respondTo`. Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #request-response-send } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #request-response-send } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #request-response-send } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #request-response-send } On the receiving side the @scala[`ActorRef[response]`]@java[`ActorRef`] can then be used to send one or more responses back: Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #request-response-respond } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #request-response-respond } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #request-response-respond } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #request-response-respond } **Useful when:** @@ -102,10 +102,10 @@ Java Most often the sending actor does not, and should not, support receiving the response messages of another actor. In such cases we need to provide an `ActorRef` of the right type and adapt the response message to a type that the sending actor can handle. Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #adapted-response } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #adapted-response } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #adapted-response } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #adapted-response } You can register several message adapters for different message classes. It's only possible to have one message adapter per message class to make sure @@ -147,10 +147,10 @@ In an interaction where there is a 1:1 mapping between a request and a response The interaction has two steps, first we need to construct the outgoing message, to do that we need an @scala[`ActorRef[Response]`]@java[`ActorRef`] to put as recipient in the outgoing message. The second step is to transform the successful `Response` or failure into a message that is part of the protocol of the sending actor. Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #actor-ask } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #actor-ask } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #actor-ask } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #actor-ask } The response adapting function is running in the receiving actor and can safely access state of it, but if it throws an exception the actor is stopped. @@ -177,10 +177,10 @@ Some times you need to interact with actors from outside of the actor system, th To do this we use @scala[`ActorRef.ask` (or the symbolic `ActorRef.?`) implicitly provided by `akka.actor.typed.scaladsl.AskPattern`]@java[`akka.actor.typed.javadsl.AskPattern.ask`] to send a message to an actor and get a @scala[`Future[Response]`]@java[`CompletionState[Response]`] back. Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #standalone-ask } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #standalone-ask } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #standalone-ask } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #standalone-ask } **Useful when:** @@ -204,10 +204,10 @@ The child is created with the context it needs to do the work, including an `Act As the protocol of the session actor is not a public API but rather an implementation detail of the parent actor, it may not always make sense to have an explicit protocol and adapt the messages of the actors that the session actor interacts with. For this use case it is possible to express that the actor can receive any message (@scala[`Any`]@java[`Object`]). Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #per-session-child } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #per-session-child } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #per-session-child } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #per-session-child } In an actual session child you would likely want to include some form of timeout as well (see @ref:[scheduling messages to self](#scheduling-messages-to-self)). @@ -229,10 +229,10 @@ The following example demonstrates how to use timers to schedule messages to an The `Buncher` actor buffers a burst of incoming messages and delivers them as a batch after a timeout or when the number of batched messages exceeds a maximum size. Scala -: @@snip [InteractionPatternsSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #timer } +: @@snip [InteractionPatternsSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala) { #timer } Java -: @@snip [InteractionPatternsTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #timer } +: @@snip [InteractionPatternsTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/InteractionPatternsTest.java) { #timer } There are a few things worth noting here: diff --git a/akka-docs/src/main/paradox/typed/persistence.md b/akka-docs/src/main/paradox/typed/persistence.md index 2019e569b4..b42891d844 100644 --- a/akka-docs/src/main/paradox/typed/persistence.md +++ b/akka-docs/src/main/paradox/typed/persistence.md @@ -30,10 +30,10 @@ This module is currently marked as @ref:[may change](../common/may-change.md) in Let's start with a simple example. The minimum required for a `PersistentBehavior` is: Scala -: @@snip [BasicPersistentBehaviorsCompileOnly.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #structure } +: @@snip [BasicPersistentBehaviorsCompileOnly.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #structure } Java -: @@snip [BasicPersistentBehaviorsTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #structure } +: @@snip [BasicPersistentBehaviorsTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #structure } The first important thing to notice is the `Behavior` of a persistent actor is typed to the type of the `Command` because this is the type of message a persistent actor should receive. In Akka Typed this is now enforced by the type system. @@ -87,43 +87,43 @@ in the event handler, as those are also executed during recovery of an persisten Command and event: Scala -: @@snip [PersistentActorCompileOnyTest.scala]($akka$/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #command } +: @@snip [PersistentActorCompileOnyTest.scala](/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #command } Java -: @@snip [PersistentActorCompileOnyTest.java]($akka$/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #command } +: @@snip [PersistentActorCompileOnyTest.java](/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #command } State is a List containing all the events: Scala -: @@snip [PersistentActorCompileOnyTest.scala]($akka$/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #state } +: @@snip [PersistentActorCompileOnyTest.scala](/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #state } Java -: @@snip [PersistentActorCompileOnyTest.java]($akka$/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #state } +: @@snip [PersistentActorCompileOnyTest.java](/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #state } The command handler persists the `Cmd` payload in an `Evt`@java[. In this simple example the command handler is defined using a lambda, for the more complicated example below a `CommandHandlerBuilder` is used]: Scala -: @@snip [PersistentActorCompileOnyTest.scala]($akka$/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #command-handler } +: @@snip [PersistentActorCompileOnyTest.scala](/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #command-handler } Java -: @@snip [PersistentActorCompileOnyTest.java]($akka$/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #command-handler } +: @@snip [PersistentActorCompileOnyTest.java](/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #command-handler } The event handler appends the event to the state. This is called after successfully persisting the event in the database @java[. As with the command handler the event handler is defined using a lambda, see below for a more complicated example using the `EventHandlerBuilder`]: Scala -: @@snip [PersistentActorCompileOnyTest.scala]($akka$/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #event-handler } +: @@snip [PersistentActorCompileOnyTest.scala](/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #event-handler } Java -: @@snip [PersistentActorCompileOnyTest.java]($akka$/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #event-handler } +: @@snip [PersistentActorCompileOnyTest.java](/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #event-handler } These are used to create a `PersistentBehavior`: Scala -: @@snip [PersistentActorCompileOnyTest.scala]($akka$/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #behavior } +: @@snip [PersistentActorCompileOnyTest.scala](/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #behavior } Java -: @@snip [PersistentActorCompileOnyTest.java]($akka$/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #behavior } +: @@snip [PersistentActorCompileOnyTest.java](/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #behavior } The `PersistentBehavior` can then be run as with any plain typed actor as described in [typed actors documentation](actors-typed.md). @@ -152,18 +152,18 @@ then it we can look it up with `GetPost`, modify it with `ChangeBody` or publish The state is captured by: Scala -: @@snip [InDepthPersistentBehaviorSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #state } +: @@snip [InDepthPersistentBehaviorSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #state } Java -: @@snip [InDepthPersistentBehaviorTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #state } +: @@snip [InDepthPersistentBehaviorTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #state } The commands, of which only a subset are valid depending on the state: Scala -: @@snip [InDepthPersistentBehaviorSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #commands } +: @@snip [InDepthPersistentBehaviorSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #commands } Java -: @@snip [InDepthPersistentBehaviorTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #commands } +: @@snip [InDepthPersistentBehaviorTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #commands } @java[The commandler handler to process each command is decided by the state class (or state predicate) that is given to the `commandHandlerBuilder` and the match cases in the builders. Several builders can be composed with `orElse`:] @@ -171,44 +171,44 @@ given to the `commandHandlerBuilder` and the match cases in the builders. Severa which is a function from `State => CommandHandler`:] Scala -: @@snip [InDepthPersistentBehaviorSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #by-state-command-handler } +: @@snip [InDepthPersistentBehaviorSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #by-state-command-handler } Java -: @@snip [InDepthPersistentBehaviorTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #command-handler } +: @@snip [InDepthPersistentBehaviorTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #command-handler } The @java[`CommandHandlerBuilder`]@scala[`CommandHandler`] for a post that hasn't been initialized with content: Scala -: @@snip [InDepthPersistentBehaviorSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #initial-command-handler } +: @@snip [InDepthPersistentBehaviorSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #initial-command-handler } Java -: @@snip [InDepthPersistentBehaviorTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #initial-command-handler } +: @@snip [InDepthPersistentBehaviorTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #initial-command-handler } And a different @java[`CommandHandlerBuilder`]@scala[`CommandHandler`] for after the post content has been added: Scala -: @@snip [InDepthPersistentBehaviorSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #post-added-command-handler } +: @@snip [InDepthPersistentBehaviorSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #post-added-command-handler } Java -: @@snip [InDepthPersistentBehaviorTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #post-added-command-handler } +: @@snip [InDepthPersistentBehaviorTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #post-added-command-handler } The event handler is always the same independent of state. The main reason for not making the event handler part of the `CommandHandler` is that contrary to Commands, all events must be handled and that is typically independent of what the current state is. The event handler can still decide what to do based on the state, if that is needed. Scala -: @@snip [InDepthPersistentBehaviorSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #event-handler } +: @@snip [InDepthPersistentBehaviorSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #event-handler } Java -: @@snip [InDepthPersistentBehaviorTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #event-handler } +: @@snip [InDepthPersistentBehaviorTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #event-handler } And finally the behavior is created @scala[from the `PersistentBehaviors.receive`]: Scala -: @@snip [InDepthPersistentBehaviorSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #behavior } +: @@snip [InDepthPersistentBehaviorSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/InDepthPersistentBehaviorSpec.scala) { #behavior } Java -: @@snip [InDepthPersistentBehaviorTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #behavior } +: @@snip [InDepthPersistentBehaviorTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/InDepthPersistentBehaviorTest.java) { #behavior } ## Effects and Side Effects @@ -228,10 +228,10 @@ Most of them time this will be done with the `thenRun` method on the `Effect` ab factor out common `SideEffect`s. For example: Scala -: @@snip [BasicPersistentBehaviorsCompileOnly.scala]($akka$/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #commonChainedEffects } +: @@snip [BasicPersistentBehaviorsCompileOnly.scala](/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala) { #commonChainedEffects } Java -: @@snip [BasicPersistentBehaviorsCompileOnly.scala]($akka$/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #commonChainedEffects } +: @@snip [BasicPersistentBehaviorsCompileOnly.scala](/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #commonChainedEffects } ### Side effects ordering and guarantees @@ -251,10 +251,10 @@ It is strongly discouraged to perform side effects in `applyEvent`, so side effects should be performed once recovery has completed @scala[in the `onRecoveryCompleted` callback.] @java[by overriding `onRecoveryCompleted`] Scala -: @@snip [BasicPersistentBehaviorsCompileOnly.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #recovery } +: @@snip [BasicPersistentBehaviorsCompileOnly.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #recovery } Java -: @@snip [BasicPersistentBehaviorsTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #recovery } +: @@snip [BasicPersistentBehaviorsTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #recovery } The `onRecoveryCompleted` takes @scala[an `ActorContext` and] the current `State`, and doesn't return anything. @@ -264,10 +264,10 @@ and doesn't return anything. Persistence typed allows you to use event tags without using @ref[`EventAdapter`](../persistence.md#event-adapters): Scala -: @@snip [BasicPersistentActorCompileOnly.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #tagging } +: @@snip [BasicPersistentActorCompileOnly.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #tagging } Java -: @@snip [BasicPersistentBehaviorsTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #tagging } +: @@snip [BasicPersistentBehaviorsTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #tagging } ## Event adapters @@ -277,18 +277,18 @@ to another type that is then passed to the journal. Defining an event adapter is done by extending an EventAdapter: Scala -: @@snip [x]($akka$/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentBehaviorSpec.scala) { #event-wrapper } +: @@snip [x](/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentBehaviorSpec.scala) { #event-wrapper } Java -: @@snip [x]($akka$/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #event-wrapper } +: @@snip [x](/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #event-wrapper } Then install it on a persistent behavior: Scala -: @@snip [x]($akka$/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentBehaviorSpec.scala) { #install-event-adapter } +: @@snip [x](/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentBehaviorSpec.scala) { #install-event-adapter } Java -: @@snip [x]($akka$/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #install-event-adapter } +: @@snip [x](/akka-persistence-typed/src/test/java/akka/persistence/typed/javadsl/PersistentActorCompileOnlyTest.java) { #install-event-adapter } ## Wrapping Persistent Behaviors @@ -297,10 +297,10 @@ other behaviors such as `Behaviors.setup` in order to access the `ActorContext` to access the actor logging upon taking snapshots for debug purpose. Scala -: @@snip [BasicPersistentActorCompileOnly.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #wrapPersistentBehavior } +: @@snip [BasicPersistentActorCompileOnly.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #wrapPersistentBehavior } Java -: @@snip [BasicPersistentBehaviorsTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #wrapPersistentBehavior } +: @@snip [BasicPersistentBehaviorsTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #wrapPersistentBehavior } ## Journal failures @@ -311,10 +311,10 @@ any `BackoffSupervisorStrategy`. It is not possible to use the normal supervisio Scala -: @@snip [BasicPersistentBehaviorsSpec.scala]($akka$/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #supervision } +: @@snip [BasicPersistentBehaviorsSpec.scala](/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorsCompileOnly.scala) { #supervision } Java -: @@snip [BasicPersistentBehaviorsTest.java]($akka$/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #supervision } +: @@snip [BasicPersistentBehaviorsTest.java](/akka-persistence-typed/src/test/java/jdocs/akka/persistence/typed/BasicPersistentBehaviorsTest.java) { #supervision } ## Journal rejections diff --git a/akka-docs/src/main/paradox/typed/stash.md b/akka-docs/src/main/paradox/typed/stash.md index 55cdf2f0fd..8119af184c 100644 --- a/akka-docs/src/main/paradox/typed/stash.md +++ b/akka-docs/src/main/paradox/typed/stash.md @@ -27,10 +27,10 @@ When a new state is saved in the database it also stashes incoming messages to m processing sequential, one after the other without multiple pending writes. Scala -: @@snip [StashDocSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StashDocSpec.scala) { #stashing } +: @@snip [StashDocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StashDocSpec.scala) { #stashing } Java -: @@snip [StashDocTest.java]($akka$/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/StashDocTest.java) { +: @@snip [StashDocTest.java](/akka-actor-typed-tests/src/test/java/jdocs/akka/typed/StashDocTest.java) { #import #db #stashing diff --git a/akka-docs/src/main/paradox/typed/stream.md b/akka-docs/src/main/paradox/typed/stream.md index 686b13d4bb..ae542883f0 100644 --- a/akka-docs/src/main/paradox/typed/stream.md +++ b/akka-docs/src/main/paradox/typed/stream.md @@ -33,10 +33,10 @@ This module is currently marked as @ref:[may change](../common/may-change.md) in A stream that is driven by messages sent to a particular actor can be started with @scala[@scaladoc[`ActorSource.actorRef`](akka.stream.typed.scaladsl.ActorSource#actorRef)]@java[@javadoc[`ActorSource.actorRef`](akka.stream.typed.javadsl.ActorSource#actorRef)]. This source materializes to a typed `ActorRef` which only accepts messages that are of the same type as the stream. Scala -: @@snip [ActorSourceSinkExample.scala]($akka$/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala) { #actor-source-ref } +: @@snip [ActorSourceSinkExample.scala](/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala) { #actor-source-ref } Java -: @@snip [ActorSourceExample.java]($akka$/akka-stream-typed/src/test/java/docs/akka/stream/typed/ActorSourceExample.java) { #actor-source-ref } +: @@snip [ActorSourceExample.java](/akka-stream-typed/src/test/java/docs/akka/stream/typed/ActorSourceExample.java) { #actor-source-ref } ## Actor Sink @@ -44,15 +44,15 @@ Java There are two sinks available that accept typed `ActorRef`s. To send all of the messages from a stream to an actor without considering backpressure, use @scala[@scaladoc[`ActorSink.actorRef`](akka.stream.typed.scaladsl.ActorSink#actorRef)]@java[@javadoc[`ActorSink.actorRef`](akka.stream.typed.javadsl.ActorSink#actorRef)]. Scala -: @@snip [ActorSourceSinkExample.scala]($akka$/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala) { #actor-sink-ref } +: @@snip [ActorSourceSinkExample.scala](/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala) { #actor-sink-ref } Java -: @@snip [ActorSinkExample.java]($akka$/akka-stream-typed/src/test/java/docs/akka/stream/typed/ActorSinkExample.java) { #actor-sink-ref } +: @@snip [ActorSinkExample.java](/akka-stream-typed/src/test/java/docs/akka/stream/typed/ActorSinkExample.java) { #actor-sink-ref } For an actor to be able to react to backpressure, a protocol needs to be introduced between the actor and the stream. Use @scala[@scaladoc[`ActorSink.actorRefWithAck`](akka.stream.typed.scaladsl.ActorSink#actorRefWithAck)]@java[@javadoc[`ActorSink.actorRefWithAck`](akka.stream.typed.javadsl.ActorSink#actorRefWithAck)] to be able to signal demand when the actor is ready to receive more elements. Scala -: @@snip [ActorSourceSinkExample.scala]($akka$/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala) { #actor-sink-ref-with-ack } +: @@snip [ActorSourceSinkExample.scala](/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala) { #actor-sink-ref-with-ack } Java -: @@snip [ActorSinkWithAckExample.java]($akka$/akka-stream-typed/src/test/java/docs/akka/stream/typed/ActorSinkWithAckExample.java) { #actor-sink-ref-with-ack } +: @@snip [ActorSinkWithAckExample.java](/akka-stream-typed/src/test/java/docs/akka/stream/typed/ActorSinkWithAckExample.java) { #actor-sink-ref-with-ack } diff --git a/akka-docs/src/main/paradox/typed/testing.md b/akka-docs/src/main/paradox/typed/testing.md index 89b798e6ab..f17d68564f 100644 --- a/akka-docs/src/main/paradox/typed/testing.md +++ b/akka-docs/src/main/paradox/typed/testing.md @@ -54,27 +54,27 @@ The following demonstrates how to test: The examples below require the following imports: Scala -: @@snip [SyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #imports } +: @@snip [SyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #imports } Java -: @@snip [SyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #imports } +: @@snip [SyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #imports } Each of the tests are testing an actor that based on the message executes a different effect to be tested: Scala -: @@snip [SyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #under-test } +: @@snip [SyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #under-test } Java -: @@snip [SyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #under-test } +: @@snip [SyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #under-test } For creating a child actor a noop actor is created: Scala -: @@snip [SyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #child } +: @@snip [SyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #child } Java -: @@snip [SyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #child } +: @@snip [SyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #child } All of the tests make use of the `BehaviorTestkit` to avoid the need for a real `ActorContext`. Some of the tests make use of the `TestInbox` which allows the creation of an `ActorRef` that can be used for synchronous testing, similar to the @@ -86,18 +86,18 @@ make use of the `TestInbox` which allows the creation of an `ActorRef` that can With a name: Scala -: @@snip [SyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-child } +: @@snip [SyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-child } Java -: @@snip [SyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-child } +: @@snip [SyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-child } Anonymously: Scala -: @@snip [SyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-anonymous-child } +: @@snip [SyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-anonymous-child } Java -: @@snip [SyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-anonymous-child } +: @@snip [SyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-anonymous-child } ### Sending messages @@ -105,27 +105,27 @@ For testing sending a message a `TestInbox` is created that provides an `ActorRe messages that have been sent to it. Scala -: @@snip [SyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-message } +: @@snip [SyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-message } Java -: @@snip [SyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-message } +: @@snip [SyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-message } Another use case is sending a message to a child actor you can do this by looking up the 'TestInbox' for a child actor from the 'BehaviorTestKit': Scala -: @@snip [SyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-child-message } +: @@snip [SyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-child-message } Java -: @@snip [SyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-child-message } +: @@snip [SyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-child-message } For anonymous children the actor names are generated in a deterministic way: Scala -: @@snip [SyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-child-message-anonymous } +: @@snip [SyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala) { #test-child-message-anonymous } Java -: @@snip [SyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-child-message-anonymous } +: @@snip [SyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/SyncTestingExampleTest.java) { #test-child-message-anonymous } ### Testing other effects @@ -153,10 +153,10 @@ the same in that a single procedure drives the test. Actor under test: Scala -: @@snip [AsyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #under-test } +: @@snip [AsyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #under-test } Java -: @@snip [AsyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #under-test } +: @@snip [AsyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #under-test } @scala[Tests extend `ActorTestKit`. This provides access to]@java[Tests create an instance of `ActorTestKit`. This provides access to] @@ -165,18 +165,18 @@ Java * A hook to shut down the ActorSystem from the test suite Scala -: @@snip [AsyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #test-header } +: @@snip [AsyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #test-header } Java -: @@snip [AsyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #test-header } +: @@snip [AsyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #test-header } Your test is responsible for shutting down the `ActorSystem` e.g. using `BeforeAndAfterAll` when using ScalaTest Scala -: @@snip [AsyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #test-shutdown } +: @@snip [AsyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #test-shutdown } Java -: @@snip [AsyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #test-shutdown } +: @@snip [AsyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #test-shutdown } The following demonstrates: @@ -185,18 +185,18 @@ The following demonstrates: * Verifying that the actor under test responds via the `TestProbe` Scala -: @@snip [AsyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #test-spawn } +: @@snip [AsyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #test-spawn } Java -: @@snip [AsyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #test-spawn } +: @@snip [AsyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #test-spawn } Actors can also be spawned anonymously: Scala -: @@snip [AsyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #test-spawn-anonymous } +: @@snip [AsyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala) { #test-spawn-anonymous } Java -: @@snip [AsyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #test-spawn-anonymous } +: @@snip [AsyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/AsyncTestingExampleTest.java) { #test-spawn-anonymous } ### Test framework integration @@ -218,10 +218,10 @@ hook it into a ScalaTest test suite. @@@ Scala -: @@snip [AsyncTestingExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AbstractActorSpec.scala) { #scalatest-glue } +: @@snip [AsyncTestingExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/AbstractActorSpec.scala) { #scalatest-glue } Java -: @@snip [AsyncTestingExampleTest.java]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/JunitIntegrationExampleTest.java) { #junit-integration } +: @@snip [AsyncTestingExampleTest.java](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/JunitIntegrationExampleTest.java) { #junit-integration } ### Controlling the scheduler @@ -232,7 +232,7 @@ Making such tests more reliable by using generous timeouts make the tests take a For such situations, we provide a scheduler where you can manually, explicitly advance the clock. Scala -: @@snip [ManualTimerExampleSpec.scala]($akka$/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala) { #manual-scheduling-simple } +: @@snip [ManualTimerExampleSpec.scala](/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala) { #manual-scheduling-simple } Java -: @@snip [ManualTimerExampleTest.scala]($akka$/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/ManualTimerExampleTest.java) { #manual-scheduling-simple } +: @@snip [ManualTimerExampleTest.scala](/akka-actor-testkit-typed/src/test/java/akka/actor/testkit/typed/javadsl/ManualTimerExampleTest.java) { #manual-scheduling-simple } diff --git a/build.sbt b/build.sbt index e359277e61..c6c96de85d 100644 --- a/build.sbt +++ b/build.sbt @@ -227,8 +227,6 @@ lazy val docs = akkaModule("akka-docs") "algolia.docsearch.index_name" -> "akka_io", "google.analytics.account" -> "UA-21117439-1", "google.analytics.domain.name" -> "akka.io", - "snip.code.base_dir" -> (sourceDirectory in Test).value.getAbsolutePath, - "snip.akka.base_dir" -> (baseDirectory in ThisBuild).value.getAbsolutePath, "signature.akka.base_dir" -> (baseDirectory in ThisBuild).value.getAbsolutePath, "fiddle.code.base_dir" -> (sourceDirectory in Test).value.getAbsolutePath ), diff --git a/project/ParadoxSupport.scala b/project/ParadoxSupport.scala index 702f563314..c5233a1f93 100644 --- a/project/ParadoxSupport.scala +++ b/project/ParadoxSupport.scala @@ -102,11 +102,10 @@ object ParadoxSupport { case _ => sys.error("Source references are not supported") } val file = - if (source startsWith "$") { - val baseKey = source.drop(1).takeWhile(_ != '$') - val base = new File(PropertyUrl(s"signature.$baseKey.base_dir", variables.get).base.trim) - val effectiveBase = if (base.isAbsolute) base else new File(page.file.getParentFile, base.toString) - new File(effectiveBase, source.drop(baseKey.length + 2)) + if (source startsWith "/") { + // snip.build.base_dir defined by Paradox + val base = new File(PropertyUrl("snip.build.base_dir", variables.get).base.trim) + new File(base, source) } else new File(page.file.getParentFile, source) val Signature = """\s*((def|val|type) (\w+)(?=[:(\[]).*)(\s+\=.*)""".r // stupid approximation to match a signature