From 81df4ff4177223f1343cc6e1c9f4e257b9f00342 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Mickevi=C4=8Dius?= Date: Thu, 11 May 2017 11:59:28 +0300 Subject: [PATCH] #22903 Fix code snippeds * move scala and java sources out of paradox sources * replace relative paths with $code$ and $akka$ * fix broken includes --- akka-docs/build.sbt | 6 +- .../jdocs/AbstractJavaTest.scala | 0 .../jdocs/actor/ActorDocTest.java | 0 .../actor/ByteBufferSerializerDocTest.java | 0 .../actor/DependencyInjectionDocTest.java | 0 .../jdocs/actor/FaultHandlingDocSample.java | 0 .../jdocs/actor/FaultHandlingTest.java | 0 .../jdocs/actor/GraduallyBuiltActor.java | 0 .../jdocs/actor/ImmutableMessage.java | 0 .../jdocs/actor/InboxDocTest.java | 0 .../jdocs/actor/InitializationDocTest.java | 0 .../code => java}/jdocs/actor/Messages.java | 0 .../code => java}/jdocs/actor/MyActor.java | 0 .../jdocs/actor/MyBoundedActor.java | 0 .../jdocs/actor/MyStoppingActor.java | 0 .../jdocs/actor/SampleActor.java | 0 .../jdocs/actor/SampleActorTest.java | 0 .../jdocs/actor/SchedulerDocTest.java | 0 .../jdocs/actor/TypedActorDocTest.java | 0 .../jdocs/actor/fsm/Buncher.java | 0 .../jdocs/actor/fsm/BuncherTest.java | 0 .../code => java}/jdocs/actor/fsm/Events.java | 0 .../jdocs/actor/fsm/FSMDocTest.java | 0 .../jdocs/agent/AgentDocTest.java | 0 .../jdocs/camel/ActivationTestBase.java | 0 .../jdocs/camel/CamelExtensionTest.java | 0 .../code => java}/jdocs/camel/Consumer1.java | 0 .../code => java}/jdocs/camel/Consumer2.java | 0 .../code => java}/jdocs/camel/Consumer3.java | 0 .../code => java}/jdocs/camel/Consumer4.java | 0 .../jdocs/camel/CustomRouteBuilder.java | 0 .../jdocs/camel/CustomRouteTestBase.java | 0 .../jdocs/camel/ErrorThrowingConsumer.java | 0 .../jdocs/camel/FirstProducer.java | 0 .../code => java}/jdocs/camel/Forwarder.java | 0 .../code => java}/jdocs/camel/MyActor.java | 0 .../code => java}/jdocs/camel/MyEndpoint.java | 0 .../jdocs/camel/OnRouteResponseTestBase.java | 0 .../jdocs/camel/OnewaySender.java | 0 .../code => java}/jdocs/camel/Orders.java | 0 .../code => java}/jdocs/camel/Producer1.java | 0 .../jdocs/camel/ProducerTestBase.java | 0 .../jdocs/camel/RequestBodyActor.java | 0 .../code => java}/jdocs/camel/Responder.java | 0 .../jdocs/camel/ResponseReceiver.java | 0 .../jdocs/camel/Transformer.java | 0 .../circuitbreaker/DangerousJavaActor.java | 2 +- .../EvenNoFailureJavaExample.java | 2 +- .../circuitbreaker/TellPatternJavaActor.java | 2 +- .../jdocs/cluster/ClusterDocTest.java | 0 .../jdocs/cluster/FactorialBackend.java | 0 .../jdocs/cluster/FactorialFrontend.java | 0 .../jdocs/cluster/FactorialFrontendMain.java | 0 .../jdocs/cluster/FactorialResult.java | 0 .../jdocs/cluster/MetricsListener.java | 0 .../jdocs/cluster/SimpleClusterListener.java | 0 .../jdocs/cluster/SimpleClusterListener2.java | 0 .../jdocs/cluster/StatsAggregator.java | 0 .../jdocs/cluster/StatsMessages.java | 0 .../jdocs/cluster/StatsSampleClient.java | 0 .../StatsSampleOneMasterClientMain.java | 0 .../cluster/StatsSampleOneMasterMain.java | 0 .../jdocs/cluster/StatsService.java | 0 .../jdocs/cluster/StatsWorker.java | 0 .../jdocs/cluster/TransformationBackend.java | 0 .../jdocs/cluster/TransformationFrontend.java | 0 .../jdocs/cluster/TransformationMessages.java | 0 .../docs => java/jdocs}/config/ConfigDoc.java | 2 +- .../code => java}/jdocs/ddata/DataBot.java | 0 .../jdocs/ddata/DistributedDataDocTest.java | 0 .../jdocs/ddata/ShoppingCart.java | 0 .../jdocs/ddata/TwoPhaseSet.java | 0 .../ddata/protobuf/TwoPhaseSetSerializer.java | 0 .../protobuf/TwoPhaseSetSerializer2.java | 0 .../TwoPhaseSetSerializerWithCompression.java | 0 .../jdocs/dispatcher/DispatcherDocTest.java | 0 .../jdocs/dispatcher/MyUnboundedMailbox.java | 0 .../MyUnboundedMessageQueueSemantics.java | 0 .../docs => java/jdocs}/duration/Java.java | 2 +- .../jdocs/event/EventBusDocTest.java | 0 .../jdocs/event/LoggingDocTest.java | 0 .../jdocs/extension/ExtensionDocTest.java | 0 .../extension/SettingsExtensionDocTest.java | 0 .../jdocs/future/FutureDocTest.java | 0 .../code => java}/jdocs/io/IODocTest.java | 0 .../jdocs/io/JavaReadBackPressure.java | 0 .../jdocs/io/JavaUdpMulticast.java | 0 .../jdocs/io/JavaUdpMulticastTest.java | 0 .../jdocs/io/UdpConnectedDocTest.java | 0 .../code => java}/jdocs/io/UdpDocTest.java | 0 .../jdocs/io/japi/EchoHandler.java | 0 .../jdocs/io/japi/EchoManager.java | 0 .../jdocs/io/japi/EchoServer.java | 0 .../jdocs/io/japi/IODocTest.java | 0 .../code => java}/jdocs/io/japi/Message.java | 0 .../jdocs/io/japi/SimpleEchoHandler.java | 0 .../code => java}/jdocs/io/japi/Watcher.java | 0 .../pattern/BackoffSupervisorDocTest.java | 0 .../jdocs/pattern/SchedulerPatternTest.java | 0 .../jdocs/pattern/SupervisedAsk.java | 0 .../jdocs/pattern/SupervisedAskSpec.java | 0 .../persistence/LambdaPersistenceDocTest.java | 0 .../LambdaPersistencePluginDocTest.java | 0 .../PersistenceEventAdapterDocTest.java | 0 .../persistence/PersistenceMultiDocTest.java | 0 .../persistence/PersistenceQueryDocTest.java | 0 .../PersistenceSchemaEvolutionDocTest.java | 0 .../persistence/PersistentActorExample.java | 0 .../query/LeveldbPersistenceQueryDocTest.java | 0 .../query/MyEventsByTagJavaPublisher.java | 0 .../remoting/RemoteDeploymentDocTest.java | 0 .../ConsistentHashingRouterDocTest.java | 0 .../jdocs/routing/CustomRouterDocTest.java | 0 .../jdocs/routing/RedundancyGroup.java | 0 .../jdocs/routing/RouterDocTest.java | 0 .../serialization/SerializationDocTest.java | 0 .../jdocs/stream/ActorPublisherDocTest.java | 0 .../jdocs/stream/ActorSubscriberDocTest.java | 0 .../jdocs/stream/BidiFlowDocTest.java | 0 .../jdocs/stream/CompositionDocTest.java | 0 .../jdocs/stream/FlowDocTest.java | 0 .../jdocs/stream/FlowErrorDocTest.java | 0 .../jdocs/stream/FlowParallelismDocTest.java | 0 .../jdocs/stream/GraphCyclesDocTest.java | 0 .../jdocs/stream/GraphDSLDocTest.java | 0 .../jdocs/stream/GraphStageDocTest.java | 0 .../stream/GraphStageLoggingDocTest.java | 0 .../jdocs/stream/HubDocTest.java | 0 .../jdocs/stream/IntegrationDocTest.java | 0 .../jdocs/stream/KillSwitchDocTest.java | 0 .../java/code => java}/jdocs/stream/Main.java | 2 + .../jdocs/stream/MigrationsJava.java | 0 .../jdocs/stream/QuickStartDocTest.java | 0 .../stream/RateTransformationDocTest.java | 0 .../jdocs/stream/ReactiveStreamsDocTest.java | 0 .../jdocs/stream/SilenceSystemOut.java | 0 .../stream/StreamBuffersRateDocTest.java | 0 .../stream/StreamPartialGraphDSLDocTest.java | 0 .../jdocs/stream/StreamTestKitDocTest.java | 0 .../TwitterStreamQuickstartDocTest.java | 0 .../jdocs/stream/io/StreamFileDocTest.java | 0 .../jdocs/stream/io/StreamTcpDocTest.java | 0 .../javadsl/cookbook/RecipeByteStrings.java | 0 .../javadsl/cookbook/RecipeDecompress.java | 0 .../stream/javadsl/cookbook/RecipeDigest.java | 0 .../cookbook/RecipeDroppyBroadcast.java | 0 .../javadsl/cookbook/RecipeFlattenList.java | 0 .../cookbook/RecipeGlobalRateLimit.java | 0 .../stream/javadsl/cookbook/RecipeHold.java | 0 .../javadsl/cookbook/RecipeKeepAlive.java | 0 .../cookbook/RecipeLoggingElements.java | 0 .../javadsl/cookbook/RecipeManualTrigger.java | 0 .../javadsl/cookbook/RecipeMissedTicks.java | 0 .../cookbook/RecipeMultiGroupByTest.java | 0 .../javadsl/cookbook/RecipeParseLines.java | 0 .../cookbook/RecipeReduceByKeyTest.java | 0 .../stream/javadsl/cookbook/RecipeSeq.java | 0 .../javadsl/cookbook/RecipeSimpleDrop.java | 0 .../stream/javadsl/cookbook/RecipeTest.java | 0 .../javadsl/cookbook/RecipeWorkerPool.java | 0 .../jdocs/testkit/ParentChildTest.java | 0 .../jdocs/testkit/TestKitDocTest.java | 0 .../jdocs/testkit/TestKitSampleTest.java | 0 akka-docs/src/main/paradox/java/actors.md | 102 +++++------ akka-docs/src/main/paradox/java/agents.md | 14 +- akka-docs/src/main/paradox/java/camel.md | 50 +++--- .../src/main/paradox/java/cluster-client.md | 14 +- .../src/main/paradox/java/cluster-metrics.md | 12 +- .../src/main/paradox/java/cluster-sharding.md | 14 +- .../main/paradox/java/cluster-singleton.md | 8 +- .../src/main/paradox/java/cluster-usage.md | 32 ++-- .../src/main/paradox/java/dispatchers.md | 22 +-- .../src/main/paradox/java/distributed-data.md | 62 +++---- .../main/paradox/java/distributed-pub-sub.md | 18 +- akka-docs/src/main/paradox/java/event-bus.md | 30 ++-- .../src/main/paradox/java/extending-akka.md | 20 +-- .../paradox/java/fault-tolerance-sample.md | 2 +- .../src/main/paradox/java/fault-tolerance.md | 22 +-- akka-docs/src/main/paradox/java/fsm.md | 36 ++-- akka-docs/src/main/paradox/java/futures.md | 70 ++++---- akka-docs/src/main/paradox/java/howto.md | 8 +- akka-docs/src/main/paradox/java/io-tcp.md | 32 ++-- akka-docs/src/main/paradox/java/io-udp.md | 12 +- akka-docs/src/main/paradox/java/io.md | 2 +- akka-docs/src/main/paradox/java/logging.md | 14 +- akka-docs/src/main/paradox/java/mailboxes.md | 30 ++-- .../paradox/java/persistence-query-leveldb.md | 12 +- .../main/paradox/java/persistence-query.md | 32 ++-- .../java/persistence-schema-evolution.md | 28 +-- .../src/main/paradox/java/persistence.md | 116 ++++++------- .../src/main/paradox/java/remoting-artery.md | 20 +-- akka-docs/src/main/paradox/java/remoting.md | 16 +- akka-docs/src/main/paradox/java/routing.md | 160 +++++++++--------- akka-docs/src/main/paradox/java/scheduler.md | 16 +- .../src/main/paradox/java/serialization.md | 26 +-- .../paradox/java/stream/stream-composition.md | 30 ++-- .../paradox/java/stream/stream-cookbook.md | 62 +++---- .../paradox/java/stream/stream-customize.md | 26 +-- .../paradox/java/stream/stream-dynamic.md | 20 +-- .../main/paradox/java/stream/stream-error.md | 14 +- .../java/stream/stream-flows-and-basics.md | 16 +- .../main/paradox/java/stream/stream-graphs.md | 38 ++--- .../java/stream/stream-integrations.md | 58 +++---- .../src/main/paradox/java/stream/stream-io.md | 12 +- .../paradox/java/stream/stream-parallelism.md | 8 +- .../paradox/java/stream/stream-quickstart.md | 48 +++--- .../main/paradox/java/stream/stream-rate.md | 28 +-- .../paradox/java/stream/stream-testkit.md | 20 +-- akka-docs/src/main/paradox/java/testing.md | 99 +++++------ .../src/main/paradox/java/typed-actors.md | 36 ++-- akka-docs/src/main/paradox/scala/actordsl.md | 16 +- akka-docs/src/main/paradox/scala/actors.md | 90 +++++----- .../src/main/paradox/scala/additional/faq.md | 2 +- .../src/main/paradox/scala/additional/osgi.md | 2 +- akka-docs/src/main/paradox/scala/agents.md | 20 +-- akka-docs/src/main/paradox/scala/camel.md | 42 ++--- .../src/main/paradox/scala/cluster-client.md | 14 +- .../src/main/paradox/scala/cluster-metrics.md | 12 +- .../main/paradox/scala/cluster-sharding.md | 14 +- .../main/paradox/scala/cluster-singleton.md | 10 +- .../src/main/paradox/scala/cluster-usage.md | 40 ++--- .../paradox/scala/common/circuitbreaker.md | 16 +- .../src/main/paradox/scala/common/duration.md | 10 +- .../paradox/scala/dev/multi-jvm-testing.md | 2 +- .../paradox/scala/dev/multi-node-testing.md | 6 +- .../src/main/paradox/scala/dispatchers.md | 22 +-- .../main/paradox/scala/distributed-data.md | 62 +++---- .../main/paradox/scala/distributed-pub-sub.md | 18 +- akka-docs/src/main/paradox/scala/event-bus.md | 26 +-- .../src/main/paradox/scala/extending-akka.md | 18 +- .../paradox/scala/fault-tolerance-sample.md | 2 +- .../src/main/paradox/scala/fault-tolerance.md | 24 +-- akka-docs/src/main/paradox/scala/fsm.md | 40 ++--- akka-docs/src/main/paradox/scala/futures.md | 60 +++---- .../paradox/scala/general/configuration.md | 32 ++-- .../src/main/paradox/scala/general/jmm.md | 2 +- .../general/stream/stream-configuration.md | 2 +- .../main/paradox/scala/general/supervision.md | 16 +- akka-docs/src/main/paradox/scala/howto.md | 4 +- akka-docs/src/main/paradox/scala/io-tcp.md | 34 ++-- akka-docs/src/main/paradox/scala/io-udp.md | 12 +- akka-docs/src/main/paradox/scala/io.md | 2 +- akka-docs/src/main/paradox/scala/logging.md | 12 +- akka-docs/src/main/paradox/scala/mailboxes.md | 28 +-- .../scala/persistence-query-leveldb.md | 12 +- .../main/paradox/scala/persistence-query.md | 30 ++-- .../scala/persistence-schema-evolution.md | 28 +-- .../src/main/paradox/scala/persistence.md | 118 ++++++------- .../src/main/paradox/scala/remoting-artery.md | 20 +-- akka-docs/src/main/paradox/scala/remoting.md | 16 +- akka-docs/src/main/paradox/scala/routing.md | 160 +++++++++--------- akka-docs/src/main/paradox/scala/scheduler.md | 12 +- .../src/main/paradox/scala/serialization.md | 20 +-- .../scala/stream/stream-composition.md | 28 +-- .../paradox/scala/stream/stream-cookbook.md | 50 +++--- .../paradox/scala/stream/stream-customize.md | 32 ++-- .../paradox/scala/stream/stream-dynamic.md | 22 +-- .../main/paradox/scala/stream/stream-error.md | 14 +- .../scala/stream/stream-flows-and-basics.md | 16 +- .../paradox/scala/stream/stream-graphs.md | 46 ++--- .../scala/stream/stream-integrations.md | 58 +++---- .../main/paradox/scala/stream/stream-io.md | 12 +- .../scala/stream/stream-parallelism.md | 8 +- .../paradox/scala/stream/stream-quickstart.md | 48 +++--- .../main/paradox/scala/stream/stream-rate.md | 28 +-- .../paradox/scala/stream/stream-testkit.md | 20 +-- akka-docs/src/main/paradox/scala/testing.md | 60 +++---- .../src/main/paradox/scala/testkit-example.md | 2 +- .../src/main/paradox/scala/typed-actors.md | 40 ++--- akka-docs/src/main/paradox/scala/typed.md | 14 +- .../code => scala}/docs/CompileOnlySpec.scala | 0 .../docs/actor/ActorDocSpec.scala | 0 .../actor/ByteBufferSerializerDocSpec.scala | 0 .../docs/actor/FSMDocSpec.scala | 0 .../docs/actor/FaultHandlingDocSample.scala | 0 .../docs/actor/FaultHandlingDocSpec.scala | 0 .../docs/actor/InitializationDocSpec.scala | 0 .../docs/actor/PropsEdgeCaseSpec.scala | 0 .../docs/actor/SchedulerDocSpec.scala | 0 .../actor/SharedMutableStateDocSpec.scala | 0 .../docs/actor/TypedActorDocSpec.scala | 0 .../docs/actor/UnnestedReceives.scala | 0 .../docs/agent/AgentDocSpec.scala | 0 .../docs/akka/typed/IntroSpec.scala | 0 .../code => scala}/docs/camel/Consumers.scala | 0 .../docs/camel/CustomRoute.scala | 0 .../docs/camel/Introduction.scala | 0 .../code => scala}/docs/camel/Producers.scala | 0 .../docs/camel/PublishSubscribe.scala | 0 .../CircuitBreakerDocSpec.scala | 0 .../docs/cluster/ClusterDocSpec.scala | 0 .../docs/cluster/FactorialBackend.scala | 0 .../docs/cluster/FactorialFrontend.scala | 0 .../docs/cluster/MetricsListener.scala | 0 .../docs/cluster/SimpleClusterListener.scala | 0 .../docs/cluster/SimpleClusterListener2.scala | 0 .../docs/cluster/TransformationBackend.scala | 0 .../docs/cluster/TransformationFrontend.scala | 0 .../docs/cluster/TransformationMessages.scala | 0 .../docs/config/ConfigDocSpec.scala | 0 .../docs/ddata/DistributedDataDocSpec.scala | 0 .../docs/ddata/ShoppingCart.scala | 0 .../docs/ddata/TwoPhaseSet.scala | 0 .../protobuf/TwoPhaseSetSerializer.scala | 0 .../protobuf/TwoPhaseSetSerializer2.scala | 0 .../docs/dispatcher/DispatcherDocSpec.scala | 0 .../docs/dispatcher/MyUnboundedMailbox.scala | 0 .../code => scala}/docs/duration/Sample.scala | 0 .../docs/event/EventBusDocSpec.scala | 0 .../docs/event/LoggingDocSpec.scala | 0 .../docs/extension/ExtensionDocSpec.scala | 0 .../extension/SettingsExtensionDocSpec.scala | 0 .../code => scala}/docs/faq/Faq.scala | 0 .../docs/future/FutureDocSpec.scala | 0 .../code => scala}/docs/io/EchoServer.scala | 0 .../code => scala}/docs/io/IODocSpec.scala | 0 .../docs/io/ReadBackPressure.scala | 0 .../docs/io/ScalaUdpMulticast.scala | 0 .../docs/io/ScalaUdpMulticastSpec.scala | 0 .../code => scala}/docs/io/UdpDocSpec.scala | 0 .../pattern/BackoffSupervisorDocSpec.scala | 0 .../docs/pattern/SchedulerPatternSpec.scala | 0 .../docs/persistence/PersistenceDocSpec.scala | 0 .../PersistenceEventAdapterDocSpec.scala | 0 .../persistence/PersistenceMultiDocSpec.scala | 0 .../PersistencePluginDocSpec.scala | 0 .../PersistenceSchemaEvolutionDocSpec.scala | 0 .../PersistenceSerializerDocSpec.scala | 0 .../persistence/PersistentActorExample.scala | 0 .../LeveldbPersistenceQueryDocSpec.scala | 0 .../query/MyEventsByTagPublisher.scala | 0 .../query/PersistenceQueryDocSpec.scala | 0 .../remoting/RemoteDeploymentDocSpec.scala | 0 .../ConsistentHashingRouterDocSpec.scala | 0 .../docs/routing/CustomRouterDocSpec.scala | 0 .../docs/routing/RouterDocSpec.scala | 0 .../serialization/SerializationDocSpec.scala | 0 .../docs/stream/ActorPublisherDocSpec.scala | 0 .../docs/stream/ActorSubscriberDocSpec.scala | 0 .../docs/stream/BidiFlowDocSpec.scala | 0 .../docs/stream/CompositionDocSpec.scala | 0 .../docs/stream/FlowDocSpec.scala | 0 .../docs/stream/FlowErrorDocSpec.scala | 0 .../docs/stream/FlowParallelismDocSpec.scala | 0 .../docs/stream/GraphCyclesSpec.scala | 0 .../docs/stream/GraphDSLDocSpec.scala | 0 .../docs/stream/GraphStageDocSpec.scala | 0 .../stream/GraphStageLoggingDocSpec.scala | 0 .../docs/stream/HubsDocSpec.scala | 0 .../docs/stream/IntegrationDocSpec.scala | 0 .../docs/stream/KillSwitchDocSpec.scala | 0 .../docs/stream/MigrationsScala.scala | 0 .../docs/stream/QuickStartDocSpec.scala | 0 .../stream/RateTransformationDocSpec.scala | 0 .../docs/stream/ReactiveStreamsDocSpec.scala | 0 .../docs/stream/StreamBuffersRateSpec.scala | 0 .../stream/StreamPartialGraphDSLDocSpec.scala | 0 .../docs/stream/StreamTestKitDocSpec.scala | 0 .../TwitterStreamQuickstartDocSpec.scala | 0 .../stream/cookbook/RecipeByteStrings.scala | 0 .../cookbook/RecipeCollectingMetrics.scala | 0 .../stream/cookbook/RecipeDecompress.scala | 0 .../docs/stream/cookbook/RecipeDigest.scala | 0 .../cookbook/RecipeDroppyBroadcast.scala | 0 .../stream/cookbook/RecipeFlattenSeq.scala | 0 .../cookbook/RecipeGlobalRateLimit.scala | 0 .../docs/stream/cookbook/RecipeHold.scala | 0 .../stream/cookbook/RecipeKeepAlive.scala | 0 .../cookbook/RecipeLoggingElements.scala | 0 .../stream/cookbook/RecipeManualTrigger.scala | 0 .../stream/cookbook/RecipeMissedTicks.scala | 0 .../stream/cookbook/RecipeMultiGroupBy.scala | 0 .../stream/cookbook/RecipeParseLines.scala | 0 .../stream/cookbook/RecipeReduceByKey.scala | 0 .../docs/stream/cookbook/RecipeSeq.scala | 0 .../stream/cookbook/RecipeSimpleDrop.scala | 0 .../docs/stream/cookbook/RecipeSpec.scala | 0 .../stream/cookbook/RecipeWorkerPool.scala | 0 .../docs/stream/io/StreamFileDocSpec.scala | 0 .../docs/stream/io/StreamTcpDocSpec.scala | 0 .../docs/testkit/ParentChildSpec.scala | 0 .../docs/testkit/PlainWordSpec.scala | 0 .../docs/testkit/TestKitUsageSpec.scala | 0 .../docs/testkit/TestkitDocSpec.scala | 0 384 files changed, 1609 insertions(+), 1602 deletions(-) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/AbstractJavaTest.scala (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/ActorDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/ByteBufferSerializerDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/DependencyInjectionDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/FaultHandlingDocSample.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/FaultHandlingTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/GraduallyBuiltActor.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/ImmutableMessage.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/InboxDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/InitializationDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/Messages.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/MyActor.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/MyBoundedActor.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/MyStoppingActor.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/SampleActor.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/SampleActorTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/SchedulerDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/TypedActorDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/fsm/Buncher.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/fsm/BuncherTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/fsm/Events.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/actor/fsm/FSMDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/agent/AgentDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/ActivationTestBase.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/CamelExtensionTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Consumer1.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Consumer2.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Consumer3.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Consumer4.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/CustomRouteBuilder.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/CustomRouteTestBase.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/ErrorThrowingConsumer.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/FirstProducer.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Forwarder.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/MyActor.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/MyEndpoint.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/OnRouteResponseTestBase.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/OnewaySender.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Orders.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Producer1.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/ProducerTestBase.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/RequestBodyActor.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Responder.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/ResponseReceiver.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/camel/Transformer.java (100%) rename akka-docs/src/main/{paradox/scala/common/code/docs => java/jdocs}/circuitbreaker/DangerousJavaActor.java (98%) rename akka-docs/src/main/{paradox/scala/common/code/docs => java/jdocs}/circuitbreaker/EvenNoFailureJavaExample.java (97%) rename akka-docs/src/main/{paradox/scala/common/code/docs => java/jdocs}/circuitbreaker/TellPatternJavaActor.java (98%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/ClusterDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/FactorialBackend.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/FactorialFrontend.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/FactorialFrontendMain.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/FactorialResult.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/MetricsListener.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/SimpleClusterListener.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/SimpleClusterListener2.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/StatsAggregator.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/StatsMessages.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/StatsSampleClient.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/StatsSampleOneMasterClientMain.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/StatsSampleOneMasterMain.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/StatsService.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/StatsWorker.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/TransformationBackend.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/TransformationFrontend.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/cluster/TransformationMessages.java (100%) rename akka-docs/src/main/{paradox/scala/general/code/docs => java/jdocs}/config/ConfigDoc.java (97%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/ddata/DataBot.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/ddata/DistributedDataDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/ddata/ShoppingCart.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/ddata/TwoPhaseSet.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/dispatcher/DispatcherDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/dispatcher/MyUnboundedMailbox.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java (100%) rename akka-docs/src/main/{paradox/scala/common/code/docs => java/jdocs}/duration/Java.java (96%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/event/EventBusDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/event/LoggingDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/extension/ExtensionDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/extension/SettingsExtensionDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/future/FutureDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/IODocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/JavaReadBackPressure.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/JavaUdpMulticast.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/JavaUdpMulticastTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/UdpConnectedDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/UdpDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/japi/EchoHandler.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/japi/EchoManager.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/japi/EchoServer.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/japi/IODocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/japi/Message.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/japi/SimpleEchoHandler.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/io/japi/Watcher.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/pattern/BackoffSupervisorDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/pattern/SchedulerPatternTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/pattern/SupervisedAsk.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/pattern/SupervisedAskSpec.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/LambdaPersistenceDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/LambdaPersistencePluginDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/PersistenceEventAdapterDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/PersistenceMultiDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/PersistenceQueryDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/PersistentActorExample.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/persistence/query/MyEventsByTagJavaPublisher.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/remoting/RemoteDeploymentDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/routing/ConsistentHashingRouterDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/routing/CustomRouterDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/routing/RedundancyGroup.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/routing/RouterDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/serialization/SerializationDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/ActorPublisherDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/ActorSubscriberDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/BidiFlowDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/CompositionDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/FlowDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/FlowErrorDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/FlowParallelismDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/GraphCyclesDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/GraphDSLDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/GraphStageDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/GraphStageLoggingDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/HubDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/IntegrationDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/KillSwitchDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/Main.java (82%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/MigrationsJava.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/QuickStartDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/RateTransformationDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/ReactiveStreamsDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/SilenceSystemOut.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/StreamBuffersRateDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/StreamPartialGraphDSLDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/StreamTestKitDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/TwitterStreamQuickstartDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/io/StreamFileDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/io/StreamTcpDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeDecompress.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeDigest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeHold.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeParseLines.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeSeq.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/testkit/ParentChildTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/testkit/TestKitDocTest.java (100%) rename akka-docs/src/main/{paradox/java/code => java}/jdocs/testkit/TestKitSampleTest.java (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/CompileOnlySpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/ActorDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/ByteBufferSerializerDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/FSMDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/FaultHandlingDocSample.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/FaultHandlingDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/InitializationDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/PropsEdgeCaseSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/SchedulerDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/SharedMutableStateDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/TypedActorDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/actor/UnnestedReceives.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/agent/AgentDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/akka/typed/IntroSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/camel/Consumers.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/camel/CustomRoute.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/camel/Introduction.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/camel/Producers.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/camel/PublishSubscribe.scala (100%) rename akka-docs/src/main/{paradox/scala/common/code => scala}/docs/circuitbreaker/CircuitBreakerDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/ClusterDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/FactorialBackend.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/FactorialFrontend.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/MetricsListener.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/SimpleClusterListener.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/SimpleClusterListener2.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/TransformationBackend.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/TransformationFrontend.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/cluster/TransformationMessages.scala (100%) rename akka-docs/src/main/{paradox/scala/general/code => scala}/docs/config/ConfigDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/ddata/DistributedDataDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/ddata/ShoppingCart.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/ddata/TwoPhaseSet.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/ddata/protobuf/TwoPhaseSetSerializer.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/dispatcher/DispatcherDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/dispatcher/MyUnboundedMailbox.scala (100%) rename akka-docs/src/main/{paradox/scala/common/code => scala}/docs/duration/Sample.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/event/EventBusDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/event/LoggingDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/extension/ExtensionDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/extension/SettingsExtensionDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/additional/code => scala}/docs/faq/Faq.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/future/FutureDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/io/EchoServer.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/io/IODocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/io/ReadBackPressure.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/io/ScalaUdpMulticast.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/io/ScalaUdpMulticastSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/io/UdpDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/pattern/BackoffSupervisorDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/pattern/SchedulerPatternSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/PersistenceDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/PersistenceEventAdapterDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/PersistenceMultiDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/PersistencePluginDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/PersistenceSerializerDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/PersistentActorExample.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/query/MyEventsByTagPublisher.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/persistence/query/PersistenceQueryDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/remoting/RemoteDeploymentDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/routing/ConsistentHashingRouterDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/routing/CustomRouterDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/routing/RouterDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/serialization/SerializationDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/ActorPublisherDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/ActorSubscriberDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/BidiFlowDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/CompositionDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/FlowDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/FlowErrorDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/FlowParallelismDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/GraphCyclesSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/GraphDSLDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/GraphStageDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/GraphStageLoggingDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/HubsDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/IntegrationDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/KillSwitchDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/MigrationsScala.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/QuickStartDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/RateTransformationDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/ReactiveStreamsDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/StreamBuffersRateSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/StreamPartialGraphDSLDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/StreamTestKitDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/TwitterStreamQuickstartDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeByteStrings.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeCollectingMetrics.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeDecompress.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeDigest.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeDroppyBroadcast.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeFlattenSeq.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeGlobalRateLimit.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeHold.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeKeepAlive.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeLoggingElements.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeManualTrigger.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeMissedTicks.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeMultiGroupBy.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeParseLines.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeReduceByKey.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeSeq.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeSimpleDrop.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/cookbook/RecipeWorkerPool.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/io/StreamFileDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/stream/io/StreamTcpDocSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/testkit/ParentChildSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/testkit/PlainWordSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/testkit/TestKitUsageSpec.scala (100%) rename akka-docs/src/main/{paradox/scala/code => scala}/docs/testkit/TestkitDocSpec.scala (100%) diff --git a/akka-docs/build.sbt b/akka-docs/build.sbt index e0b1b1a411..da9af94abe 100644 --- a/akka-docs/build.sbt +++ b/akka-docs/build.sbt @@ -8,7 +8,7 @@ Formatting.docFormatSettings Dependencies.docs unmanagedSourceDirectories in ScalariformKeys.format in Test <<= unmanagedSourceDirectories in Test -//TODO: additionalTasks in ValidatePR += paradox in Paradox +additionalTasks in ValidatePR += paradox enablePlugins(ScaladocNoVerificationOfDiagrams) disablePlugins(MimaPlugin) @@ -17,7 +17,9 @@ enablePlugins(ParadoxPlugin) paradoxProperties ++= Map( "extref.wikipedia.base_url" -> "https://en.wikipedia.org/wiki/%s", "scala.version" -> scalaVersion.value, - "akka.version" -> version.value + "akka.version" -> version.value, + "snip.code.base_dir" -> (sourceDirectory in Compile).value.getAbsolutePath, + "snip.akka.base_dir" -> ((baseDirectory in Compile).value / "..").getAbsolutePath ) paradoxTheme := Some("com.lightbend.akka" % "paradox-theme-akka" % "0.1.0-SNAPSHOT") paradoxNavigationDepth := 1 diff --git a/akka-docs/src/main/paradox/java/code/jdocs/AbstractJavaTest.scala b/akka-docs/src/main/java/jdocs/AbstractJavaTest.scala similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/AbstractJavaTest.scala rename to akka-docs/src/main/java/jdocs/AbstractJavaTest.scala diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/ActorDocTest.java b/akka-docs/src/main/java/jdocs/actor/ActorDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/ActorDocTest.java rename to akka-docs/src/main/java/jdocs/actor/ActorDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/ByteBufferSerializerDocTest.java b/akka-docs/src/main/java/jdocs/actor/ByteBufferSerializerDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/ByteBufferSerializerDocTest.java rename to akka-docs/src/main/java/jdocs/actor/ByteBufferSerializerDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/DependencyInjectionDocTest.java b/akka-docs/src/main/java/jdocs/actor/DependencyInjectionDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/DependencyInjectionDocTest.java rename to akka-docs/src/main/java/jdocs/actor/DependencyInjectionDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/FaultHandlingDocSample.java b/akka-docs/src/main/java/jdocs/actor/FaultHandlingDocSample.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/FaultHandlingDocSample.java rename to akka-docs/src/main/java/jdocs/actor/FaultHandlingDocSample.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/FaultHandlingTest.java b/akka-docs/src/main/java/jdocs/actor/FaultHandlingTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/FaultHandlingTest.java rename to akka-docs/src/main/java/jdocs/actor/FaultHandlingTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/GraduallyBuiltActor.java b/akka-docs/src/main/java/jdocs/actor/GraduallyBuiltActor.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/GraduallyBuiltActor.java rename to akka-docs/src/main/java/jdocs/actor/GraduallyBuiltActor.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/ImmutableMessage.java b/akka-docs/src/main/java/jdocs/actor/ImmutableMessage.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/ImmutableMessage.java rename to akka-docs/src/main/java/jdocs/actor/ImmutableMessage.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/InboxDocTest.java b/akka-docs/src/main/java/jdocs/actor/InboxDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/InboxDocTest.java rename to akka-docs/src/main/java/jdocs/actor/InboxDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/InitializationDocTest.java b/akka-docs/src/main/java/jdocs/actor/InitializationDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/InitializationDocTest.java rename to akka-docs/src/main/java/jdocs/actor/InitializationDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/Messages.java b/akka-docs/src/main/java/jdocs/actor/Messages.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/Messages.java rename to akka-docs/src/main/java/jdocs/actor/Messages.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/MyActor.java b/akka-docs/src/main/java/jdocs/actor/MyActor.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/MyActor.java rename to akka-docs/src/main/java/jdocs/actor/MyActor.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/MyBoundedActor.java b/akka-docs/src/main/java/jdocs/actor/MyBoundedActor.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/MyBoundedActor.java rename to akka-docs/src/main/java/jdocs/actor/MyBoundedActor.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/MyStoppingActor.java b/akka-docs/src/main/java/jdocs/actor/MyStoppingActor.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/MyStoppingActor.java rename to akka-docs/src/main/java/jdocs/actor/MyStoppingActor.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/SampleActor.java b/akka-docs/src/main/java/jdocs/actor/SampleActor.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/SampleActor.java rename to akka-docs/src/main/java/jdocs/actor/SampleActor.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/SampleActorTest.java b/akka-docs/src/main/java/jdocs/actor/SampleActorTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/SampleActorTest.java rename to akka-docs/src/main/java/jdocs/actor/SampleActorTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/SchedulerDocTest.java b/akka-docs/src/main/java/jdocs/actor/SchedulerDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/SchedulerDocTest.java rename to akka-docs/src/main/java/jdocs/actor/SchedulerDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/TypedActorDocTest.java b/akka-docs/src/main/java/jdocs/actor/TypedActorDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/TypedActorDocTest.java rename to akka-docs/src/main/java/jdocs/actor/TypedActorDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/fsm/Buncher.java b/akka-docs/src/main/java/jdocs/actor/fsm/Buncher.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/fsm/Buncher.java rename to akka-docs/src/main/java/jdocs/actor/fsm/Buncher.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/fsm/BuncherTest.java b/akka-docs/src/main/java/jdocs/actor/fsm/BuncherTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/fsm/BuncherTest.java rename to akka-docs/src/main/java/jdocs/actor/fsm/BuncherTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/fsm/Events.java b/akka-docs/src/main/java/jdocs/actor/fsm/Events.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/fsm/Events.java rename to akka-docs/src/main/java/jdocs/actor/fsm/Events.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/actor/fsm/FSMDocTest.java b/akka-docs/src/main/java/jdocs/actor/fsm/FSMDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/actor/fsm/FSMDocTest.java rename to akka-docs/src/main/java/jdocs/actor/fsm/FSMDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/agent/AgentDocTest.java b/akka-docs/src/main/java/jdocs/agent/AgentDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/agent/AgentDocTest.java rename to akka-docs/src/main/java/jdocs/agent/AgentDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/ActivationTestBase.java b/akka-docs/src/main/java/jdocs/camel/ActivationTestBase.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/ActivationTestBase.java rename to akka-docs/src/main/java/jdocs/camel/ActivationTestBase.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/CamelExtensionTest.java b/akka-docs/src/main/java/jdocs/camel/CamelExtensionTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/CamelExtensionTest.java rename to akka-docs/src/main/java/jdocs/camel/CamelExtensionTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Consumer1.java b/akka-docs/src/main/java/jdocs/camel/Consumer1.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Consumer1.java rename to akka-docs/src/main/java/jdocs/camel/Consumer1.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Consumer2.java b/akka-docs/src/main/java/jdocs/camel/Consumer2.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Consumer2.java rename to akka-docs/src/main/java/jdocs/camel/Consumer2.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Consumer3.java b/akka-docs/src/main/java/jdocs/camel/Consumer3.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Consumer3.java rename to akka-docs/src/main/java/jdocs/camel/Consumer3.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Consumer4.java b/akka-docs/src/main/java/jdocs/camel/Consumer4.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Consumer4.java rename to akka-docs/src/main/java/jdocs/camel/Consumer4.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/CustomRouteBuilder.java b/akka-docs/src/main/java/jdocs/camel/CustomRouteBuilder.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/CustomRouteBuilder.java rename to akka-docs/src/main/java/jdocs/camel/CustomRouteBuilder.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/CustomRouteTestBase.java b/akka-docs/src/main/java/jdocs/camel/CustomRouteTestBase.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/CustomRouteTestBase.java rename to akka-docs/src/main/java/jdocs/camel/CustomRouteTestBase.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/ErrorThrowingConsumer.java b/akka-docs/src/main/java/jdocs/camel/ErrorThrowingConsumer.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/ErrorThrowingConsumer.java rename to akka-docs/src/main/java/jdocs/camel/ErrorThrowingConsumer.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/FirstProducer.java b/akka-docs/src/main/java/jdocs/camel/FirstProducer.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/FirstProducer.java rename to akka-docs/src/main/java/jdocs/camel/FirstProducer.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Forwarder.java b/akka-docs/src/main/java/jdocs/camel/Forwarder.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Forwarder.java rename to akka-docs/src/main/java/jdocs/camel/Forwarder.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/MyActor.java b/akka-docs/src/main/java/jdocs/camel/MyActor.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/MyActor.java rename to akka-docs/src/main/java/jdocs/camel/MyActor.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/MyEndpoint.java b/akka-docs/src/main/java/jdocs/camel/MyEndpoint.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/MyEndpoint.java rename to akka-docs/src/main/java/jdocs/camel/MyEndpoint.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/OnRouteResponseTestBase.java b/akka-docs/src/main/java/jdocs/camel/OnRouteResponseTestBase.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/OnRouteResponseTestBase.java rename to akka-docs/src/main/java/jdocs/camel/OnRouteResponseTestBase.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/OnewaySender.java b/akka-docs/src/main/java/jdocs/camel/OnewaySender.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/OnewaySender.java rename to akka-docs/src/main/java/jdocs/camel/OnewaySender.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Orders.java b/akka-docs/src/main/java/jdocs/camel/Orders.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Orders.java rename to akka-docs/src/main/java/jdocs/camel/Orders.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Producer1.java b/akka-docs/src/main/java/jdocs/camel/Producer1.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Producer1.java rename to akka-docs/src/main/java/jdocs/camel/Producer1.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/ProducerTestBase.java b/akka-docs/src/main/java/jdocs/camel/ProducerTestBase.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/ProducerTestBase.java rename to akka-docs/src/main/java/jdocs/camel/ProducerTestBase.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/RequestBodyActor.java b/akka-docs/src/main/java/jdocs/camel/RequestBodyActor.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/RequestBodyActor.java rename to akka-docs/src/main/java/jdocs/camel/RequestBodyActor.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Responder.java b/akka-docs/src/main/java/jdocs/camel/Responder.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Responder.java rename to akka-docs/src/main/java/jdocs/camel/Responder.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/ResponseReceiver.java b/akka-docs/src/main/java/jdocs/camel/ResponseReceiver.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/ResponseReceiver.java rename to akka-docs/src/main/java/jdocs/camel/ResponseReceiver.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/camel/Transformer.java b/akka-docs/src/main/java/jdocs/camel/Transformer.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/camel/Transformer.java rename to akka-docs/src/main/java/jdocs/camel/Transformer.java diff --git a/akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/DangerousJavaActor.java b/akka-docs/src/main/java/jdocs/circuitbreaker/DangerousJavaActor.java similarity index 98% rename from akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/DangerousJavaActor.java rename to akka-docs/src/main/java/jdocs/circuitbreaker/DangerousJavaActor.java index af9a7de764..004916d0d7 100644 --- a/akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/DangerousJavaActor.java +++ b/akka-docs/src/main/java/jdocs/circuitbreaker/DangerousJavaActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package docs.circuitbreaker; +package jdocs.circuitbreaker; //#imports1 diff --git a/akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/EvenNoFailureJavaExample.java b/akka-docs/src/main/java/jdocs/circuitbreaker/EvenNoFailureJavaExample.java similarity index 97% rename from akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/EvenNoFailureJavaExample.java rename to akka-docs/src/main/java/jdocs/circuitbreaker/EvenNoFailureJavaExample.java index e1536e8581..02873b8ff0 100644 --- a/akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/EvenNoFailureJavaExample.java +++ b/akka-docs/src/main/java/jdocs/circuitbreaker/EvenNoFailureJavaExample.java @@ -1,4 +1,4 @@ -package docs.circuitbreaker; +package jdocs.circuitbreaker; import akka.actor.AbstractActor; import akka.pattern.CircuitBreaker; diff --git a/akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/TellPatternJavaActor.java b/akka-docs/src/main/java/jdocs/circuitbreaker/TellPatternJavaActor.java similarity index 98% rename from akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/TellPatternJavaActor.java rename to akka-docs/src/main/java/jdocs/circuitbreaker/TellPatternJavaActor.java index 3e937702f5..ca734af602 100644 --- a/akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/TellPatternJavaActor.java +++ b/akka-docs/src/main/java/jdocs/circuitbreaker/TellPatternJavaActor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package docs.circuitbreaker; +package jdocs.circuitbreaker; import akka.actor.ActorRef; import akka.actor.ReceiveTimeout; diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/ClusterDocTest.java b/akka-docs/src/main/java/jdocs/cluster/ClusterDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/ClusterDocTest.java rename to akka-docs/src/main/java/jdocs/cluster/ClusterDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/FactorialBackend.java b/akka-docs/src/main/java/jdocs/cluster/FactorialBackend.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/FactorialBackend.java rename to akka-docs/src/main/java/jdocs/cluster/FactorialBackend.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/FactorialFrontend.java b/akka-docs/src/main/java/jdocs/cluster/FactorialFrontend.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/FactorialFrontend.java rename to akka-docs/src/main/java/jdocs/cluster/FactorialFrontend.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/FactorialFrontendMain.java b/akka-docs/src/main/java/jdocs/cluster/FactorialFrontendMain.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/FactorialFrontendMain.java rename to akka-docs/src/main/java/jdocs/cluster/FactorialFrontendMain.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/FactorialResult.java b/akka-docs/src/main/java/jdocs/cluster/FactorialResult.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/FactorialResult.java rename to akka-docs/src/main/java/jdocs/cluster/FactorialResult.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/MetricsListener.java b/akka-docs/src/main/java/jdocs/cluster/MetricsListener.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/MetricsListener.java rename to akka-docs/src/main/java/jdocs/cluster/MetricsListener.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/SimpleClusterListener.java b/akka-docs/src/main/java/jdocs/cluster/SimpleClusterListener.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/SimpleClusterListener.java rename to akka-docs/src/main/java/jdocs/cluster/SimpleClusterListener.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/SimpleClusterListener2.java b/akka-docs/src/main/java/jdocs/cluster/SimpleClusterListener2.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/SimpleClusterListener2.java rename to akka-docs/src/main/java/jdocs/cluster/SimpleClusterListener2.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsAggregator.java b/akka-docs/src/main/java/jdocs/cluster/StatsAggregator.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsAggregator.java rename to akka-docs/src/main/java/jdocs/cluster/StatsAggregator.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsMessages.java b/akka-docs/src/main/java/jdocs/cluster/StatsMessages.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsMessages.java rename to akka-docs/src/main/java/jdocs/cluster/StatsMessages.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsSampleClient.java b/akka-docs/src/main/java/jdocs/cluster/StatsSampleClient.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsSampleClient.java rename to akka-docs/src/main/java/jdocs/cluster/StatsSampleClient.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsSampleOneMasterClientMain.java b/akka-docs/src/main/java/jdocs/cluster/StatsSampleOneMasterClientMain.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsSampleOneMasterClientMain.java rename to akka-docs/src/main/java/jdocs/cluster/StatsSampleOneMasterClientMain.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsSampleOneMasterMain.java b/akka-docs/src/main/java/jdocs/cluster/StatsSampleOneMasterMain.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsSampleOneMasterMain.java rename to akka-docs/src/main/java/jdocs/cluster/StatsSampleOneMasterMain.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsService.java b/akka-docs/src/main/java/jdocs/cluster/StatsService.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsService.java rename to akka-docs/src/main/java/jdocs/cluster/StatsService.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsWorker.java b/akka-docs/src/main/java/jdocs/cluster/StatsWorker.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/StatsWorker.java rename to akka-docs/src/main/java/jdocs/cluster/StatsWorker.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/TransformationBackend.java b/akka-docs/src/main/java/jdocs/cluster/TransformationBackend.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/TransformationBackend.java rename to akka-docs/src/main/java/jdocs/cluster/TransformationBackend.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/TransformationFrontend.java b/akka-docs/src/main/java/jdocs/cluster/TransformationFrontend.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/TransformationFrontend.java rename to akka-docs/src/main/java/jdocs/cluster/TransformationFrontend.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/cluster/TransformationMessages.java b/akka-docs/src/main/java/jdocs/cluster/TransformationMessages.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/cluster/TransformationMessages.java rename to akka-docs/src/main/java/jdocs/cluster/TransformationMessages.java diff --git a/akka-docs/src/main/paradox/scala/general/code/docs/config/ConfigDoc.java b/akka-docs/src/main/java/jdocs/config/ConfigDoc.java similarity index 97% rename from akka-docs/src/main/paradox/scala/general/code/docs/config/ConfigDoc.java rename to akka-docs/src/main/java/jdocs/config/ConfigDoc.java index e1a4e64516..c71e0b55f8 100644 --- a/akka-docs/src/main/paradox/scala/general/code/docs/config/ConfigDoc.java +++ b/akka-docs/src/main/java/jdocs/config/ConfigDoc.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package docs.config; +package jdocs.config; import akka.actor.ActorSystem; import com.typesafe.config.*; diff --git a/akka-docs/src/main/paradox/java/code/jdocs/ddata/DataBot.java b/akka-docs/src/main/java/jdocs/ddata/DataBot.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/ddata/DataBot.java rename to akka-docs/src/main/java/jdocs/ddata/DataBot.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/ddata/DistributedDataDocTest.java b/akka-docs/src/main/java/jdocs/ddata/DistributedDataDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/ddata/DistributedDataDocTest.java rename to akka-docs/src/main/java/jdocs/ddata/DistributedDataDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/ddata/ShoppingCart.java b/akka-docs/src/main/java/jdocs/ddata/ShoppingCart.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/ddata/ShoppingCart.java rename to akka-docs/src/main/java/jdocs/ddata/ShoppingCart.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/ddata/TwoPhaseSet.java b/akka-docs/src/main/java/jdocs/ddata/TwoPhaseSet.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/ddata/TwoPhaseSet.java rename to akka-docs/src/main/java/jdocs/ddata/TwoPhaseSet.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java b/akka-docs/src/main/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java rename to akka-docs/src/main/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java b/akka-docs/src/main/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java rename to akka-docs/src/main/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java b/akka-docs/src/main/java/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java rename to akka-docs/src/main/java/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/dispatcher/DispatcherDocTest.java b/akka-docs/src/main/java/jdocs/dispatcher/DispatcherDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/dispatcher/DispatcherDocTest.java rename to akka-docs/src/main/java/jdocs/dispatcher/DispatcherDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/dispatcher/MyUnboundedMailbox.java b/akka-docs/src/main/java/jdocs/dispatcher/MyUnboundedMailbox.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/dispatcher/MyUnboundedMailbox.java rename to akka-docs/src/main/java/jdocs/dispatcher/MyUnboundedMailbox.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java b/akka-docs/src/main/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java rename to akka-docs/src/main/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java diff --git a/akka-docs/src/main/paradox/scala/common/code/docs/duration/Java.java b/akka-docs/src/main/java/jdocs/duration/Java.java similarity index 96% rename from akka-docs/src/main/paradox/scala/common/code/docs/duration/Java.java rename to akka-docs/src/main/java/jdocs/duration/Java.java index 4f29d35dc7..20f328019e 100644 --- a/akka-docs/src/main/paradox/scala/common/code/docs/duration/Java.java +++ b/akka-docs/src/main/java/jdocs/duration/Java.java @@ -2,7 +2,7 @@ * Copyright (C) 2013-2017 Lightbend Inc. */ -package docs.duration; +package jdocs.duration; //#import import scala.concurrent.duration.Duration; diff --git a/akka-docs/src/main/paradox/java/code/jdocs/event/EventBusDocTest.java b/akka-docs/src/main/java/jdocs/event/EventBusDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/event/EventBusDocTest.java rename to akka-docs/src/main/java/jdocs/event/EventBusDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/event/LoggingDocTest.java b/akka-docs/src/main/java/jdocs/event/LoggingDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/event/LoggingDocTest.java rename to akka-docs/src/main/java/jdocs/event/LoggingDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/extension/ExtensionDocTest.java b/akka-docs/src/main/java/jdocs/extension/ExtensionDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/extension/ExtensionDocTest.java rename to akka-docs/src/main/java/jdocs/extension/ExtensionDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/extension/SettingsExtensionDocTest.java b/akka-docs/src/main/java/jdocs/extension/SettingsExtensionDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/extension/SettingsExtensionDocTest.java rename to akka-docs/src/main/java/jdocs/extension/SettingsExtensionDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/future/FutureDocTest.java b/akka-docs/src/main/java/jdocs/future/FutureDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/future/FutureDocTest.java rename to akka-docs/src/main/java/jdocs/future/FutureDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/IODocTest.java b/akka-docs/src/main/java/jdocs/io/IODocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/IODocTest.java rename to akka-docs/src/main/java/jdocs/io/IODocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/JavaReadBackPressure.java b/akka-docs/src/main/java/jdocs/io/JavaReadBackPressure.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/JavaReadBackPressure.java rename to akka-docs/src/main/java/jdocs/io/JavaReadBackPressure.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/JavaUdpMulticast.java b/akka-docs/src/main/java/jdocs/io/JavaUdpMulticast.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/JavaUdpMulticast.java rename to akka-docs/src/main/java/jdocs/io/JavaUdpMulticast.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/JavaUdpMulticastTest.java b/akka-docs/src/main/java/jdocs/io/JavaUdpMulticastTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/JavaUdpMulticastTest.java rename to akka-docs/src/main/java/jdocs/io/JavaUdpMulticastTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/UdpConnectedDocTest.java b/akka-docs/src/main/java/jdocs/io/UdpConnectedDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/UdpConnectedDocTest.java rename to akka-docs/src/main/java/jdocs/io/UdpConnectedDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/UdpDocTest.java b/akka-docs/src/main/java/jdocs/io/UdpDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/UdpDocTest.java rename to akka-docs/src/main/java/jdocs/io/UdpDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/japi/EchoHandler.java b/akka-docs/src/main/java/jdocs/io/japi/EchoHandler.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/japi/EchoHandler.java rename to akka-docs/src/main/java/jdocs/io/japi/EchoHandler.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/japi/EchoManager.java b/akka-docs/src/main/java/jdocs/io/japi/EchoManager.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/japi/EchoManager.java rename to akka-docs/src/main/java/jdocs/io/japi/EchoManager.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/japi/EchoServer.java b/akka-docs/src/main/java/jdocs/io/japi/EchoServer.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/japi/EchoServer.java rename to akka-docs/src/main/java/jdocs/io/japi/EchoServer.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/japi/IODocTest.java b/akka-docs/src/main/java/jdocs/io/japi/IODocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/japi/IODocTest.java rename to akka-docs/src/main/java/jdocs/io/japi/IODocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/japi/Message.java b/akka-docs/src/main/java/jdocs/io/japi/Message.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/japi/Message.java rename to akka-docs/src/main/java/jdocs/io/japi/Message.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/japi/SimpleEchoHandler.java b/akka-docs/src/main/java/jdocs/io/japi/SimpleEchoHandler.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/japi/SimpleEchoHandler.java rename to akka-docs/src/main/java/jdocs/io/japi/SimpleEchoHandler.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/io/japi/Watcher.java b/akka-docs/src/main/java/jdocs/io/japi/Watcher.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/io/japi/Watcher.java rename to akka-docs/src/main/java/jdocs/io/japi/Watcher.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/pattern/BackoffSupervisorDocTest.java b/akka-docs/src/main/java/jdocs/pattern/BackoffSupervisorDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/pattern/BackoffSupervisorDocTest.java rename to akka-docs/src/main/java/jdocs/pattern/BackoffSupervisorDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/pattern/SchedulerPatternTest.java b/akka-docs/src/main/java/jdocs/pattern/SchedulerPatternTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/pattern/SchedulerPatternTest.java rename to akka-docs/src/main/java/jdocs/pattern/SchedulerPatternTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/pattern/SupervisedAsk.java b/akka-docs/src/main/java/jdocs/pattern/SupervisedAsk.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/pattern/SupervisedAsk.java rename to akka-docs/src/main/java/jdocs/pattern/SupervisedAsk.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/pattern/SupervisedAskSpec.java b/akka-docs/src/main/java/jdocs/pattern/SupervisedAskSpec.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/pattern/SupervisedAskSpec.java rename to akka-docs/src/main/java/jdocs/pattern/SupervisedAskSpec.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/LambdaPersistenceDocTest.java b/akka-docs/src/main/java/jdocs/persistence/LambdaPersistenceDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/LambdaPersistenceDocTest.java rename to akka-docs/src/main/java/jdocs/persistence/LambdaPersistenceDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/LambdaPersistencePluginDocTest.java b/akka-docs/src/main/java/jdocs/persistence/LambdaPersistencePluginDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/LambdaPersistencePluginDocTest.java rename to akka-docs/src/main/java/jdocs/persistence/LambdaPersistencePluginDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistenceEventAdapterDocTest.java b/akka-docs/src/main/java/jdocs/persistence/PersistenceEventAdapterDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistenceEventAdapterDocTest.java rename to akka-docs/src/main/java/jdocs/persistence/PersistenceEventAdapterDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistenceMultiDocTest.java b/akka-docs/src/main/java/jdocs/persistence/PersistenceMultiDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistenceMultiDocTest.java rename to akka-docs/src/main/java/jdocs/persistence/PersistenceMultiDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistenceQueryDocTest.java b/akka-docs/src/main/java/jdocs/persistence/PersistenceQueryDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistenceQueryDocTest.java rename to akka-docs/src/main/java/jdocs/persistence/PersistenceQueryDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java b/akka-docs/src/main/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java rename to akka-docs/src/main/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistentActorExample.java b/akka-docs/src/main/java/jdocs/persistence/PersistentActorExample.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/PersistentActorExample.java rename to akka-docs/src/main/java/jdocs/persistence/PersistentActorExample.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java b/akka-docs/src/main/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java rename to akka-docs/src/main/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/persistence/query/MyEventsByTagJavaPublisher.java b/akka-docs/src/main/java/jdocs/persistence/query/MyEventsByTagJavaPublisher.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/persistence/query/MyEventsByTagJavaPublisher.java rename to akka-docs/src/main/java/jdocs/persistence/query/MyEventsByTagJavaPublisher.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/remoting/RemoteDeploymentDocTest.java b/akka-docs/src/main/java/jdocs/remoting/RemoteDeploymentDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/remoting/RemoteDeploymentDocTest.java rename to akka-docs/src/main/java/jdocs/remoting/RemoteDeploymentDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/routing/ConsistentHashingRouterDocTest.java b/akka-docs/src/main/java/jdocs/routing/ConsistentHashingRouterDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/routing/ConsistentHashingRouterDocTest.java rename to akka-docs/src/main/java/jdocs/routing/ConsistentHashingRouterDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/routing/CustomRouterDocTest.java b/akka-docs/src/main/java/jdocs/routing/CustomRouterDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/routing/CustomRouterDocTest.java rename to akka-docs/src/main/java/jdocs/routing/CustomRouterDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/routing/RedundancyGroup.java b/akka-docs/src/main/java/jdocs/routing/RedundancyGroup.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/routing/RedundancyGroup.java rename to akka-docs/src/main/java/jdocs/routing/RedundancyGroup.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/routing/RouterDocTest.java b/akka-docs/src/main/java/jdocs/routing/RouterDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/routing/RouterDocTest.java rename to akka-docs/src/main/java/jdocs/routing/RouterDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/serialization/SerializationDocTest.java b/akka-docs/src/main/java/jdocs/serialization/SerializationDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/serialization/SerializationDocTest.java rename to akka-docs/src/main/java/jdocs/serialization/SerializationDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/ActorPublisherDocTest.java b/akka-docs/src/main/java/jdocs/stream/ActorPublisherDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/ActorPublisherDocTest.java rename to akka-docs/src/main/java/jdocs/stream/ActorPublisherDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/ActorSubscriberDocTest.java b/akka-docs/src/main/java/jdocs/stream/ActorSubscriberDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/ActorSubscriberDocTest.java rename to akka-docs/src/main/java/jdocs/stream/ActorSubscriberDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/BidiFlowDocTest.java b/akka-docs/src/main/java/jdocs/stream/BidiFlowDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/BidiFlowDocTest.java rename to akka-docs/src/main/java/jdocs/stream/BidiFlowDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/CompositionDocTest.java b/akka-docs/src/main/java/jdocs/stream/CompositionDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/CompositionDocTest.java rename to akka-docs/src/main/java/jdocs/stream/CompositionDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/FlowDocTest.java b/akka-docs/src/main/java/jdocs/stream/FlowDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/FlowDocTest.java rename to akka-docs/src/main/java/jdocs/stream/FlowDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/FlowErrorDocTest.java b/akka-docs/src/main/java/jdocs/stream/FlowErrorDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/FlowErrorDocTest.java rename to akka-docs/src/main/java/jdocs/stream/FlowErrorDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/FlowParallelismDocTest.java b/akka-docs/src/main/java/jdocs/stream/FlowParallelismDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/FlowParallelismDocTest.java rename to akka-docs/src/main/java/jdocs/stream/FlowParallelismDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/GraphCyclesDocTest.java b/akka-docs/src/main/java/jdocs/stream/GraphCyclesDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/GraphCyclesDocTest.java rename to akka-docs/src/main/java/jdocs/stream/GraphCyclesDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/GraphDSLDocTest.java b/akka-docs/src/main/java/jdocs/stream/GraphDSLDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/GraphDSLDocTest.java rename to akka-docs/src/main/java/jdocs/stream/GraphDSLDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/GraphStageDocTest.java b/akka-docs/src/main/java/jdocs/stream/GraphStageDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/GraphStageDocTest.java rename to akka-docs/src/main/java/jdocs/stream/GraphStageDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/GraphStageLoggingDocTest.java b/akka-docs/src/main/java/jdocs/stream/GraphStageLoggingDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/GraphStageLoggingDocTest.java rename to akka-docs/src/main/java/jdocs/stream/GraphStageLoggingDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/HubDocTest.java b/akka-docs/src/main/java/jdocs/stream/HubDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/HubDocTest.java rename to akka-docs/src/main/java/jdocs/stream/HubDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/IntegrationDocTest.java b/akka-docs/src/main/java/jdocs/stream/IntegrationDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/IntegrationDocTest.java rename to akka-docs/src/main/java/jdocs/stream/IntegrationDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/KillSwitchDocTest.java b/akka-docs/src/main/java/jdocs/stream/KillSwitchDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/KillSwitchDocTest.java rename to akka-docs/src/main/java/jdocs/stream/KillSwitchDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/Main.java b/akka-docs/src/main/java/jdocs/stream/Main.java similarity index 82% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/Main.java rename to akka-docs/src/main/java/jdocs/stream/Main.java index b2616e0c76..c1c94e0fdd 100644 --- a/akka-docs/src/main/paradox/java/code/jdocs/stream/Main.java +++ b/akka-docs/src/main/java/jdocs/stream/Main.java @@ -1,3 +1,5 @@ +package jdocs.stream; + //#main-app public class Main { public static void main(String[] argv) { diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/MigrationsJava.java b/akka-docs/src/main/java/jdocs/stream/MigrationsJava.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/MigrationsJava.java rename to akka-docs/src/main/java/jdocs/stream/MigrationsJava.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/QuickStartDocTest.java b/akka-docs/src/main/java/jdocs/stream/QuickStartDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/QuickStartDocTest.java rename to akka-docs/src/main/java/jdocs/stream/QuickStartDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/RateTransformationDocTest.java b/akka-docs/src/main/java/jdocs/stream/RateTransformationDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/RateTransformationDocTest.java rename to akka-docs/src/main/java/jdocs/stream/RateTransformationDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/ReactiveStreamsDocTest.java b/akka-docs/src/main/java/jdocs/stream/ReactiveStreamsDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/ReactiveStreamsDocTest.java rename to akka-docs/src/main/java/jdocs/stream/ReactiveStreamsDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/SilenceSystemOut.java b/akka-docs/src/main/java/jdocs/stream/SilenceSystemOut.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/SilenceSystemOut.java rename to akka-docs/src/main/java/jdocs/stream/SilenceSystemOut.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/StreamBuffersRateDocTest.java b/akka-docs/src/main/java/jdocs/stream/StreamBuffersRateDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/StreamBuffersRateDocTest.java rename to akka-docs/src/main/java/jdocs/stream/StreamBuffersRateDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/StreamPartialGraphDSLDocTest.java b/akka-docs/src/main/java/jdocs/stream/StreamPartialGraphDSLDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/StreamPartialGraphDSLDocTest.java rename to akka-docs/src/main/java/jdocs/stream/StreamPartialGraphDSLDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/StreamTestKitDocTest.java b/akka-docs/src/main/java/jdocs/stream/StreamTestKitDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/StreamTestKitDocTest.java rename to akka-docs/src/main/java/jdocs/stream/StreamTestKitDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/TwitterStreamQuickstartDocTest.java b/akka-docs/src/main/java/jdocs/stream/TwitterStreamQuickstartDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/TwitterStreamQuickstartDocTest.java rename to akka-docs/src/main/java/jdocs/stream/TwitterStreamQuickstartDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/io/StreamFileDocTest.java b/akka-docs/src/main/java/jdocs/stream/io/StreamFileDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/io/StreamFileDocTest.java rename to akka-docs/src/main/java/jdocs/stream/io/StreamFileDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/io/StreamTcpDocTest.java b/akka-docs/src/main/java/jdocs/stream/io/StreamTcpDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/io/StreamTcpDocTest.java rename to akka-docs/src/main/java/jdocs/stream/io/StreamTcpDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeDecompress.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeDecompress.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeDecompress.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeDecompress.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeDigest.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeDigest.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeHold.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeHold.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeHold.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeHold.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeParseLines.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeParseLines.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeParseLines.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeParseLines.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeSeq.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeSeq.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeTest.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeTest.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java b/akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java rename to akka-docs/src/main/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/testkit/ParentChildTest.java b/akka-docs/src/main/java/jdocs/testkit/ParentChildTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/testkit/ParentChildTest.java rename to akka-docs/src/main/java/jdocs/testkit/ParentChildTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/testkit/TestKitDocTest.java b/akka-docs/src/main/java/jdocs/testkit/TestKitDocTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/testkit/TestKitDocTest.java rename to akka-docs/src/main/java/jdocs/testkit/TestKitDocTest.java diff --git a/akka-docs/src/main/paradox/java/code/jdocs/testkit/TestKitSampleTest.java b/akka-docs/src/main/java/jdocs/testkit/TestKitSampleTest.java similarity index 100% rename from akka-docs/src/main/paradox/java/code/jdocs/testkit/TestKitSampleTest.java rename to akka-docs/src/main/java/jdocs/testkit/TestKitSampleTest.java diff --git a/akka-docs/src/main/paradox/java/actors.md b/akka-docs/src/main/paradox/java/actors.md index 6236c98649..54ccf65523 100644 --- a/akka-docs/src/main/paradox/java/actors.md +++ b/akka-docs/src/main/paradox/java/actors.md @@ -37,7 +37,7 @@ function there is a builder named `ReceiveBuilder` that you can use. Here is an example: -@@snip [MyActor.java](code/jdocs/actor/MyActor.java) { #imports #my-actor } +@@snip [MyActor.java]($code$/java/jdocs/actor/MyActor.java) { #imports #my-actor } Please note that the Akka Actor `receive` message loop is exhaustive, which is different compared to Erlang and the late Scala Actors. This means that you @@ -64,9 +64,9 @@ creating an actor including associated deployment information (e.g. which dispatcher to use, see more below). Here are some examples of how to create a `Props` instance. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #import-props } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-props } -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #creating-props } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #creating-props } The second variant shows how to pass constructor arguments to the `Actor` being created, but it should only be used outside of actors as @@ -80,7 +80,7 @@ found. #### Dangerous Variants -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #creating-props-deprecated } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #creating-props-deprecated } This method is not recommended to be used within another actor because it encourages to close over the enclosing scope, resulting in non-serializable @@ -113,14 +113,14 @@ associated with using the `Props.create(...)` method which takes a by-name argument, since within a companion object the given code block will not retain a reference to its enclosing scope: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #props-factory } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #props-factory } Another good practice is to declare what messages an Actor can receive as close to the actor definition as possible (e.g. as static classes inside the Actor or using other suitable class), which makes it easier to know what it can receive. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #messages-in-companion } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #messages-in-companion } ### Creating Actors with Props @@ -128,15 +128,15 @@ Actors are created by passing a `Props` instance into the `actorOf` factory method which is available on `ActorSystem` and `ActorContext`. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #import-actorRef } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-actorRef } -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #system-actorOf } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #system-actorOf } Using the `ActorSystem` will create top-level actors, supervised by the actor system’s provided guardian actor, while using an actor’s context will create a child actor. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #context-actorOf } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #context-actorOf } It is recommended to create a hierarchy of children, grand-children and so on such that it fits the logical failure-handling structure of the application, @@ -166,9 +166,9 @@ be part of the `Props` as well, as described [above](Props_). But there are cases when a factory method must be used, for example when the actual constructor arguments are determined by a dependency injection framework. -@@snip [DependencyInjectionDocTest.java](code/jdocs/actor/DependencyInjectionDocTest.java) { #import } +@@snip [DependencyInjectionDocTest.java]($code$/java/jdocs/actor/DependencyInjectionDocTest.java) { #import } -@@snip [DependencyInjectionDocTest.java](code/jdocs/actor/DependencyInjectionDocTest.java) { #creating-indirectly } +@@snip [DependencyInjectionDocTest.java]($code$/java/jdocs/actor/DependencyInjectionDocTest.java) { #creating-indirectly } @@@ warning @@ -195,13 +195,13 @@ cannot do: receiving multiple replies (e.g. by subscribing an `ActorRef` to a notification service) and watching other actors’ lifecycle. For these purposes there is the `Inbox` class: -@@snip [InboxDocTest.java](code/jdocs/actor/InboxDocTest.java) { #inbox } +@@snip [InboxDocTest.java]($code$/java/jdocs/actor/InboxDocTest.java) { #inbox } The `send` method wraps a normal `tell` and supplies the internal actor’s reference as the sender. This allows the reply to be received on the last line. Watching an actor is quite simple as well: -@@snip [InboxDocTest.java](code/jdocs/actor/InboxDocTest.java) { #watch } +@@snip [InboxDocTest.java]($code$/java/jdocs/actor/InboxDocTest.java) { #watch } ## Actor API @@ -241,7 +241,7 @@ time). The remaining visible methods are user-overridable life-cycle hooks which are described in the following: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #lifecycle-callbacks } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #lifecycle-callbacks } The implementations shown above are the defaults provided by the `AbstractActor` class. @@ -300,9 +300,9 @@ termination (see [Stopping Actors](#stopping-actors)). This service is provided Registering a monitor is easy: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #import-terminated } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-terminated } -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #watch } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #watch } It should be noted that the `Terminated` message is generated independent of the order in which registration and termination occur. @@ -327,7 +327,7 @@ no `Terminated` message for that actor will be processed anymore. Right after starting the actor, its `preStart` method is invoked. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #preStart } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #preStart } This method is called when the actor is first created. During restarts it is called by the default implementation of `postRestart`, which means that @@ -400,7 +400,7 @@ actors may look up other actors by specifying absolute or relative paths—logical or physical—and receive back an `ActorSelection` with the result: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #selection-local } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #selection-local } @@@ note @@ -429,7 +429,7 @@ structure, i.e. the supervisor. The path elements of an actor selection may contain wildcard patterns allowing for broadcasting of messages to that section: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #selection-wildcard } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #selection-wildcard } Messages can be sent via the `ActorSelection` and the path of the `ActorSelection` is looked up when delivering each message. If the selection @@ -445,9 +445,9 @@ actors which are traversed in the sense that if a concrete name lookup fails negative result is generated. Please note that this does not mean that delivery of that reply is guaranteed, it still is a normal message. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #import-identify } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-identify } -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #identify } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #identify } You can also acquire an `ActorRef` for an `ActorSelection` with the `resolveOne` method of the `ActorSelection`. It returns a @@ -458,7 +458,7 @@ didn't complete within the supplied *timeout*. Remote actor addresses may also be looked up, if @ref:[remoting](remoting.md) is enabled: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #selection-remote } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #selection-remote } An example demonstrating actor look-up is given in @ref:[Remoting Sample](remoting.md#remote-sample-java). @@ -470,7 +470,7 @@ convention. Here is an example of an immutable message: -@@snip [ImmutableMessage.java](code/jdocs/actor/ImmutableMessage.java) { #immutable-message } +@@snip [ImmutableMessage.java]($code$/java/jdocs/actor/ImmutableMessage.java) { #immutable-message } ## Send messages @@ -502,7 +502,7 @@ to your message, since the sender reference is sent along with the message. This is the preferred way of sending messages. No blocking waiting for a message. This gives the best concurrency and scalability characteristics. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #tell } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #tell } The sender reference is passed along with the message and available within the receiving actor via its `getSender()` method while processing this @@ -519,9 +519,9 @@ the ask-pattern described next.. The `ask` pattern involves actors as well as futures, hence it is offered as a use pattern rather than a method on `ActorRef`: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #import-ask } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-ask } -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #ask-pipe } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #ask-pipe } This example demonstrates `ask` together with the `pipe` pattern on futures, because this is likely to be a common combination. Please note that @@ -552,7 +552,7 @@ This is *not done automatically* when an actor throws an exception while process @@@ -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #reply-exception } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #reply-exception } If the actor does not complete the future, it will expire after the timeout period, specified as parameter to the `ask` method; this will complete the @@ -584,7 +584,7 @@ original sender address/reference is maintained even though the message is going through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #forward } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #forward } ## Receive messages @@ -592,7 +592,7 @@ routers, load-balancers, replicators etc. An actor has to define its initial receive behavior by implementing the `createReceive` method in the `AbstractActor`: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #createReceive } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #createReceive } The return type is `AbstractActor.Receive` that defines which messages your Actor can handle, along with the implementation of how the messages should be processed. @@ -600,18 +600,18 @@ You can build such behavior with a builder named `ReceiveBuilder`. Here is an example: -@@snip [MyActor.java](code/jdocs/actor/MyActor.java) { #imports #my-actor } +@@snip [MyActor.java]($code$/java/jdocs/actor/MyActor.java) { #imports #my-actor } In case you want to provide many `match` cases but want to avoid creating a long call trail, you can split the creation of the builder into multiple statements as in the example: -@@snip [GraduallyBuiltActor.java](code/jdocs/actor/GraduallyBuiltActor.java) { #imports #actor } +@@snip [GraduallyBuiltActor.java]($code$/java/jdocs/actor/GraduallyBuiltActor.java) { #imports #actor } Using small methods is a good practice, also in actors. It's recommended to delegate the actual work of the message processing to methods instead of defining a huge `ReceiveBuilder` with lots of code in each lambda. A well structured actor can look like this: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #well-structured } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #well-structured } That has benefits such as: @@ -633,7 +633,7 @@ that the JVM can have problems optimizing and the resulting code might not be as untyped version. When extending `UntypedAbstractActor` each message is received as an untyped `Object` and you have to inspect and cast it to the actual message type in other ways, like this: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #optimized } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #optimized } ## Reply to messages @@ -645,7 +645,7 @@ for replying later, or passing on to other actors. If there is no sender (a message was sent without an actor or future context) then the sender defaults to a 'dead-letter' actor ref. -@@snip [MyActor.java](code/jdocs/actor/MyActor.java) { #reply } +@@snip [MyActor.java]($code$/java/jdocs/actor/MyActor.java) { #reply } ## Receive timeout @@ -661,7 +661,7 @@ timeout there must have been an idle period beforehand as configured via this me Once set, the receive timeout stays in effect (i.e. continues firing repeatedly after inactivity periods). Pass in *Duration.Undefined* to switch off this feature. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #receive-timeout } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #receive-timeout } Messages marked with `NotInfluenceReceiveTimeout` will not reset the timer. This can be useful when `ReceiveTimeout` should be fired by external inactivity but not influenced by internal activity, @@ -676,7 +676,7 @@ child actors and the system for stopping top level actors. The actual terminatio the actor is performed asynchronously, i.e. `stop` may return before the actor is stopped. -@@snip [MyStoppingActor.java](code/jdocs/actor/MyStoppingActor.java) { #my-stopping-actor } +@@snip [MyStoppingActor.java]($code$/java/jdocs/actor/MyStoppingActor.java) { #my-stopping-actor } Processing of the current message, if any, will continue before the actor is stopped, but additional messages in the mailbox will not be processed. By default these @@ -702,7 +702,7 @@ whole system. The `postStop()` hook is invoked after an actor is fully stopped. This enables cleaning up of resources: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #postStop } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #postStop } @@@ note @@ -722,18 +722,18 @@ stop the actor when the message is processed. `PoisonPill` is enqueued as ordinary messages and will be handled after messages that were already queued in the mailbox. -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #poison-pill } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #poison-pill } ### Graceful Stop `gracefulStop` is useful if you need to wait for termination or compose ordered termination of several actors: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #import-gracefulStop } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-gracefulStop } -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #gracefulStop } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #gracefulStop } -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #gracefulStop-actor } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #gracefulStop-actor } When `gracefulStop()` returns successfully, the actor’s `postStop()` hook will have been executed: there exists a happens-before edge between the end of @@ -764,7 +764,7 @@ services in a specific order and perform registered tasks during the shutdown pr The order of the shutdown phases is defined in configuration `akka.coordinated-shutdown.phases`. The default phases are defined as: -@@snip [reference.conf]../../../../../akka-actor/src/main/resources/reference.conf) { #coordinated-shutdown-phases } +@@snip [reference.conf]($akka$/akka-actor/src/main/resources/reference.conf) { #coordinated-shutdown-phases } More phases can be be added in the application's configuration if needed by overriding a phase with an additional `depends-on`. Especially the phases `before-service-unbind`, `before-cluster-shutdown` and @@ -776,7 +776,7 @@ The phases are ordered with [topological](https://en.wikipedia.org/wiki/Topologi Tasks can be added to a phase with: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-addTask } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-addTask } The returned `CompletionStage` should be completed when the task is completed. The task name parameter is only used for debugging/logging. @@ -795,7 +795,7 @@ added too late will not be run. To start the coordinated shutdown process you can invoke `runAll` on the `CoordinatedShutdown` extension: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-run } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-run } It's safe to call the `runAll` method multiple times. It will only run once. @@ -824,7 +824,7 @@ If you have application specific JVM shutdown hooks it's recommended that you re `CoordinatedShutdown` so that they are running before Akka internal shutdown hooks, e.g. those shutting down Akka Remoting (Artery). -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-jvm-hook } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #coordinated-shutdown-jvm-hook } For some tests it might be undesired to terminate the `ActorSystem` via `CoordinatedShutdown`. You can disable that by adding the following to the configuration of the `ActorSystem` that is @@ -854,7 +854,7 @@ Please note that the actor will revert to its original behavior when restarted b To hotswap the Actor behavior using `become`: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #hot-swap-actor } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #hot-swap-actor } This variant of the `become` method is useful for many different things, such as to implement a Finite State Machine (FSM, for an example see [Dining @@ -868,7 +868,7 @@ of “pop” operations (i.e. `unbecome`) matches the number of “push” ones in the long run, otherwise this amounts to a memory leak (which is why this behavior is not the default). -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #swapper } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #swapper } ## Stash @@ -894,7 +894,7 @@ control over the mailbox, see the documentation on mailboxes: @ref:[Mailboxes](m Here is an example of the `AbstractActorWithStash` class in action: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #stash } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #stash } Invoking `stash()` adds the current message (the message that the actor received last) to the actor's stash. It is typically invoked @@ -942,7 +942,7 @@ See @ref:[What Supervision Means](../scala/general/supervision.md#supervision-di Use `Kill` like this: -@@snip [ActorDocTest.java](code/jdocs/actor/ActorDocTest.java) { #kill } +@@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #kill } ## Actors and exceptions @@ -1002,7 +1002,7 @@ this behavior, and ensure that there is only one call to `preStart()`. One useful usage of this pattern is to disable creation of new `ActorRefs` for children during restarts. This can be achieved by overriding `preRestart()`: -@@snip [InitializationDocTest.java](code/jdocs/actor/InitializationDocTest.java) { #preStartInit } +@@snip [InitializationDocTest.java]($code$/java/jdocs/actor/InitializationDocTest.java) { #preStartInit } Please note, that the child actors are *still restarted*, but no new `ActorRef` is created. One can recursively apply the same principles for the children, ensuring that their `preStart()` method is called only at the creation of their @@ -1017,7 +1017,7 @@ for example in the presence of circular dependencies. In this case the actor sho and use `become()` or a finite state-machine state transition to encode the initialized and uninitialized states of the actor. -@@snip [InitializationDocTest.java](code/jdocs/actor/InitializationDocTest.java) { #messageInit } +@@snip [InitializationDocTest.java]($code$/java/jdocs/actor/InitializationDocTest.java) { #messageInit } If the actor may receive messages before it has been initialized, a useful tool can be the `Stash` to save messages until the initialization finishes, and replaying them after the actor became initialized. diff --git a/akka-docs/src/main/paradox/java/agents.md b/akka-docs/src/main/paradox/java/agents.md index ba84c482a2..4788506efe 100644 --- a/akka-docs/src/main/paradox/java/agents.md +++ b/akka-docs/src/main/paradox/java/agents.md @@ -31,14 +31,14 @@ dispatched to the same agent from other threads. Agents are created by invoking `new Agent(value, executionContext)` – passing in the Agent's initial value and providing an `ExecutionContext` to be used for it: -@@snip [AgentDocTest.java](code/jdocs/agent/AgentDocTest.java) { #import-agent #create type=java } +@@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-agent #create type=java } ## Reading an Agent's value Agents can be dereferenced (you can get an Agent's value) by invoking the Agent with `get()` like this: -@@snip [AgentDocTest.java](code/jdocs/agent/AgentDocTest.java) { #read-get type=java } +@@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #read-get type=java } Reading an Agent's current value does not involve any message passing and happens immediately. So while updates to an Agent are asynchronous, reading the @@ -47,7 +47,7 @@ state of an Agent is synchronous. You can also get a `Future` to the Agents value, that will be completed after the currently queued updates have completed: -@@snip [AgentDocTest.java](code/jdocs/agent/AgentDocTest.java) { #import-future #read-future type=java } +@@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-future #read-future type=java } See @ref:[Futures](futures.md) for more information on `Futures`. @@ -61,7 +61,7 @@ the update will be applied but dispatches to an Agent from a single thread will occur in order. You apply a value or a function by invoking the `send` function. -@@snip [AgentDocTest.java](code/jdocs/agent/AgentDocTest.java) { #import-function #send type=java } +@@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-function #send type=java } You can also dispatch a function to update the internal state but on its own thread. This does not use the reactive thread pool and can be used for @@ -69,14 +69,14 @@ long-running or blocking operations. You do this with the `sendOff` method. Dispatches using either `sendOff` or `send` will still be executed in order. -@@snip [AgentDocTest.java](code/jdocs/agent/AgentDocTest.java) { #import-function #send-off type=java } +@@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-function #send-off type=java } All `send` methods also have a corresponding `alter` method that returns a `Future`. See @ref:[Futures](futures.md) for more information on `Futures`. -@@snip [AgentDocTest.java](code/jdocs/agent/AgentDocTest.java) { #import-future #import-function #alter type=java } +@@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-future #import-function #alter type=java } -@@snip [AgentDocTest.java](code/jdocs/agent/AgentDocTest.java) { #import-future #import-function #alter-off type=java } +@@snip [AgentDocTest.java]($code$/java/jdocs/agent/AgentDocTest.java) { #import-future #import-function #alter-off type=java } ## Configuration diff --git a/akka-docs/src/main/paradox/java/camel.md b/akka-docs/src/main/paradox/java/camel.md index 98856f5418..3fd10282ea 100644 --- a/akka-docs/src/main/paradox/java/camel.md +++ b/akka-docs/src/main/paradox/java/camel.md @@ -26,7 +26,7 @@ APIs. The [camel-extra](http://code.google.com/p/camel-extra/) project provides Here's an example of using Camel's integration components in Akka. -@@snip [MyEndpoint.java](code/jdocs/camel/MyEndpoint.java) { #Consumer-mina } +@@snip [MyEndpoint.java]($code$/java/jdocs/camel/MyEndpoint.java) { #Consumer-mina } The above example exposes an actor over a TCP endpoint via Apache Camel's [Mina component](http://camel.apache.org/mina2.html). The actor implements the *getEndpointUri* method to define @@ -42,14 +42,14 @@ the *getEndpointUri* returning the URI that was set using this constructor. Actors can also trigger message exchanges with external systems i.e. produce to Camel endpoints. -@@snip [Orders.java](code/jdocs/camel/Orders.java) { #Producer } +@@snip [Orders.java]($code$/java/jdocs/camel/Orders.java) { #Producer } In the above example, any message sent to this actor will be sent to the JMS queue `Orders`. Producer actors may choose from the same set of Camel components as Consumer actors do. Below an example of how to send a message to the Orders producer. -@@snip [ProducerTestBase.java](code/jdocs/camel/ProducerTestBase.java) { #TellProducer } +@@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #TellProducer } ### CamelMessage @@ -75,7 +75,7 @@ The `CamelExtension` object provides access to the [Camel](@github@/akka-camel/s The [Camel](@github@/akka-camel/src/main/scala/akka/camel/Camel.scala) interface in turn provides access to two important Apache Camel objects, the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and the `ProducerTemplate`_. Below you can see how you can get access to these Apache Camel objects. -@@snip [CamelExtensionTest.java](code/jdocs/camel/CamelExtensionTest.java) { #CamelExtension } +@@snip [CamelExtensionTest.java]($code$/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtension } One `CamelExtension` is only loaded once for every one `ActorSystem`, which makes it safe to call the `CamelExtension` at any point in your code to get to the Apache Camel objects associated with it. There is one [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and one `ProducerTemplate`_ for every one `ActorSystem` that uses a `CamelExtension`. @@ -85,7 +85,7 @@ This interface define a single method `getContext()` used to load the [CamelCont Below an example on how to add the ActiveMQ component to the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java), which is required when you would like to use the ActiveMQ component. -@@snip [CamelExtensionTest.java](code/jdocs/camel/CamelExtensionTest.java) { #CamelExtensionAddComponent } +@@snip [CamelExtensionTest.java]($code$/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtensionAddComponent } The [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) joins the lifecycle of the `ActorSystem` and `CamelExtension` it is associated with; the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) is started when the `CamelExtension` is created, and it is shut down when the associated `ActorSystem` is shut down. The same is true for the `ProducerTemplate`_. @@ -100,12 +100,12 @@ Publication is done asynchronously; setting up an endpoint may still be in progr requested the actor to be created. Some Camel components can take a while to startup, and in some cases you might want to know when the endpoints are activated and ready to be used. The [Camel](@github@/akka-camel/src/main/scala/akka/camel/Camel.scala) interface allows you to find out when the endpoint is activated or deactivated. -@@snip [ActivationTestBase.java](code/jdocs/camel/ActivationTestBase.java) { #CamelActivation } +@@snip [ActivationTestBase.java]($code$/java/jdocs/camel/ActivationTestBase.java) { #CamelActivation } The above code shows that you can get a `Future` to the activation of the route from the endpoint to the actor, or you can wait in a blocking fashion on the activation of the route. An `ActivationTimeoutException` is thrown if the endpoint could not be activated within the specified timeout. Deactivation works in a similar fashion: -@@snip [ActivationTestBase.java](code/jdocs/camel/ActivationTestBase.java) { #CamelDeactivation } +@@snip [ActivationTestBase.java]($code$/java/jdocs/camel/ActivationTestBase.java) { #CamelDeactivation } Deactivation of a Consumer or a Producer actor happens when the actor is terminated. For a Consumer, the route to the actor is stopped. For a Producer, the [SendProcessor](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java) is stopped. A `DeActivationTimeoutException` is thrown if the associated camel objects could not be deactivated within the specified timeout. @@ -117,7 +117,7 @@ class. For example, the following actor class (Consumer1) implements the *getEndpointUri* method, which is declared in the [UntypedConsumerActor](@github@/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala) class, in order to receive messages from the `file:data/input/actor` Camel endpoint. -@@snip [Consumer1.java](code/jdocs/camel/Consumer1.java) { #Consumer1 } +@@snip [Consumer1.java]($code$/java/jdocs/camel/Consumer1.java) { #Consumer1 } Whenever a file is put into the data/input/actor directory, its content is picked up by the Camel [file component](http://camel.apache.org/file2.html) and sent as message to the @@ -129,7 +129,7 @@ Here's another example that sets the endpointUri to component`_ to start an embedded [Jetty](http://www.eclipse.org/jetty/) server, accepting HTTP connections from localhost on port 8877. -@@snip [Consumer2.java](code/jdocs/camel/Consumer2.java) { #Consumer2 } +@@snip [Consumer2.java]($code$/java/jdocs/camel/Consumer2.java) { #Consumer2 } After starting the actor, clients can send messages to that actor by POSTing to `http://localhost:8877/camel/default`. The actor sends a response by using the @@ -156,7 +156,7 @@ In this case, consumer actors must reply either with a special akka.camel.Ack message (positive acknowledgement) or a akka.actor.Status.Failure (negative acknowledgement). -@@snip [Consumer3.java](code/jdocs/camel/Consumer3.java) { #Consumer3 } +@@snip [Consumer3.java]($code$/java/jdocs/camel/Consumer3.java) { #Consumer3 } ### Consumer timeout @@ -173,13 +173,13 @@ and the actor replies to the endpoint when the response is ready. The ask reques result in the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java) failing with a TimeoutException set on the failure of the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java). The timeout on the consumer actor can be overridden with the `replyTimeout`, as shown below. -@@snip [Consumer4.java](code/jdocs/camel/Consumer4.java) { #Consumer4 } +@@snip [Consumer4.java]($code$/java/jdocs/camel/Consumer4.java) { #Consumer4 } ## Producer Actors For sending messages to Camel endpoints, actors need to inherit from the [UntypedProducerActor](@github@/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class and implement the getEndpointUri method. -@@snip [Producer1.java](code/jdocs/camel/Producer1.java) { #Producer1 } +@@snip [Producer1.java]($code$/java/jdocs/camel/Producer1.java) { #Producer1 } Producer1 inherits a default implementation of the onReceive method from the [UntypedProducerActor](@github@/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class. To customize a producer actor's default behavior you must override the [UntypedProducerActor](@github@/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformResponse and @@ -193,7 +193,7 @@ configured endpoint) will, by default, be returned to the original sender. The following example uses the ask pattern to send a message to a Producer actor and waits for a response. -@@snip [ProducerTestBase.java](code/jdocs/camel/ProducerTestBase.java) { #AskProducer } +@@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #AskProducer } The future contains the response CamelMessage, or an `AkkaCamelException` when an error occurred, which contains the headers of the response. @@ -205,16 +205,16 @@ response processing by overriding the onRouteResponse method. In the following e message is forwarded to a target actor instead of being replied to the original sender. -@@snip [ResponseReceiver.java](code/jdocs/camel/ResponseReceiver.java) { #RouteResponse } +@@snip [ResponseReceiver.java]($code$/java/jdocs/camel/ResponseReceiver.java) { #RouteResponse } -@@snip [Forwarder.java](code/jdocs/camel/Forwarder.java) { #RouteResponse } +@@snip [Forwarder.java]($code$/java/jdocs/camel/Forwarder.java) { #RouteResponse } -@@snip [OnRouteResponseTestBase.java](code/jdocs/camel/OnRouteResponseTestBase.java) { #RouteResponse } +@@snip [OnRouteResponseTestBase.java]($code$/java/jdocs/camel/OnRouteResponseTestBase.java) { #RouteResponse } Before producing messages to endpoints, producer actors can pre-process them by overriding the [UntypedProducerActor](@github@/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformOutgoingMessage method. -@@snip [Transformer.java](code/jdocs/camel/Transformer.java) { #TransformOutgoingMessage } +@@snip [Transformer.java]($code$/java/jdocs/camel/Transformer.java) { #TransformOutgoingMessage } ### Producer configuration options @@ -223,26 +223,26 @@ one-way or two-way (by initiating in-only or in-out message exchanges, respectively). By default, the producer initiates an in-out message exchange with the endpoint. For initiating an in-only exchange, producer actors have to override the isOneway method to return true. -@@snip [OnewaySender.java](code/jdocs/camel/OnewaySender.java) { #Oneway } +@@snip [OnewaySender.java]($code$/java/jdocs/camel/OnewaySender.java) { #Oneway } ### Message correlation To correlate request with response messages, applications can set the *Message.MessageExchangeId* message header. -@@snip [ProducerTestBase.java](code/jdocs/camel/ProducerTestBase.java) { #Correlate } +@@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #Correlate } ### ProducerTemplate The [UntypedProducerActor](@github@/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class is a very convenient way for actors to produce messages to Camel endpoints. Actors may also use a Camel `ProducerTemplate`_ for producing messages to endpoints. -@@snip [MyActor.java](code/jdocs/camel/MyActor.java) { #ProducerTemplate } +@@snip [MyActor.java]($code$/java/jdocs/camel/MyActor.java) { #ProducerTemplate } For initiating a two-way message exchange, one of the `ProducerTemplate.request*` methods must be used. -@@snip [RequestBodyActor.java](code/jdocs/camel/RequestBodyActor.java) { #RequestProducerTemplate } +@@snip [RequestBodyActor.java]($code$/java/jdocs/camel/RequestBodyActor.java) { #RequestProducerTemplate } ## Asynchronous routing @@ -354,11 +354,11 @@ akka://some-system/user/myconsumer?autoAck=false&replyTimeout=100+millis In the following example, a custom route to an actor is created, using the actor's path. -@@snip [Responder.java](code/jdocs/camel/Responder.java) { #CustomRoute } +@@snip [Responder.java]($code$/java/jdocs/camel/Responder.java) { #CustomRoute } -@@snip [CustomRouteBuilder.java](code/jdocs/camel/CustomRouteBuilder.java) { #CustomRoute } +@@snip [CustomRouteBuilder.java]($code$/java/jdocs/camel/CustomRouteBuilder.java) { #CustomRoute } -@@snip [CustomRouteTestBase.java](code/jdocs/camel/CustomRouteTestBase.java) { #CustomRoute } +@@snip [CustomRouteTestBase.java]($code$/java/jdocs/camel/CustomRouteTestBase.java) { #CustomRoute } The *CamelPath.toCamelUri* converts the *ActorRef* to the Camel actor component URI format which points to the actor endpoint as described above. When a message is received on the jetty endpoint, it is routed to the Responder actor, which in return replies back to the client of @@ -377,7 +377,7 @@ Extensions can be defined with Camel's [Java DSL](http://camel.apache.org/dsl.ht The following examples demonstrate how to extend a route to a consumer actor for handling exceptions thrown by that actor. -@@snip [ErrorThrowingConsumer.java](code/jdocs/camel/ErrorThrowingConsumer.java) { #ErrorThrowingConsumer } +@@snip [ErrorThrowingConsumer.java]($code$/java/jdocs/camel/ErrorThrowingConsumer.java) { #ErrorThrowingConsumer } The above ErrorThrowingConsumer sends the Failure back to the sender in preRestart because the Exception that is thrown in the actor would diff --git a/akka-docs/src/main/paradox/java/cluster-client.md b/akka-docs/src/main/paradox/java/cluster-client.md index 29b3e44f8c..859c232774 100644 --- a/akka-docs/src/main/paradox/java/cluster-client.md +++ b/akka-docs/src/main/paradox/java/cluster-client.md @@ -88,17 +88,17 @@ akka.extensions = ["akka.cluster.client.ClusterClientReceptionist"] Next, register the actors that should be available for the client. -@@snip [ClusterClientTest.java]../../../../../akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #server } +@@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #server } On the client you create the `ClusterClient` actor and use it as a gateway for sending messages to the actors identified by their path (without address information) somewhere in the cluster. -@@snip [ClusterClientTest.java]../../../../../akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #client } +@@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #client } The `initialContacts` parameter is a `Set`, which can be created like this: -@@snip [ClusterClientTest.java]../../../../../akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #initialContacts } +@@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #initialContacts } You will probably define the address information of the initial contact points in configuration or system property. See also [Configuration](#cluster-client-config-java). @@ -129,11 +129,11 @@ The following code snippet declares an actor that will receive notifications on receptionists), as they become available. The code illustrates subscribing to the events and receiving the `ClusterClient` initial state. -@@snip [ClusterClientTest.java]../../../../../akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #clientEventsListener } +@@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #clientEventsListener } Similarly we can have an actor that behaves in a similar fashion for learning what cluster clients contact a `ClusterClientReceptionist`: -@@snip [ClusterClientTest.java]../../../../../akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #receptionistEventsListener } +@@snip [ClusterClientTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/client/ClusterClientTest.java) { #receptionistEventsListener } ## Dependencies @@ -161,7 +161,7 @@ maven: The `ClusterClientReceptionist` extension (or `ClusterReceptionistSettings`) can be configured with the following properties: -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) { #receptionist-ext-config } +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #receptionist-ext-config } The following configuration properties are read by the `ClusterClientSettings` when created with a `ActorSystem` parameter. It is also possible to amend the `ClusterClientSettings` @@ -169,7 +169,7 @@ or create it from another config section with the same layout as below. `Cluster a parameter to the `ClusterClient.props` factory method, i.e. each client can be configured with different settings if needed. -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) { #cluster-client-config } +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #cluster-client-config } ## Failure handling diff --git a/akka-docs/src/main/paradox/java/cluster-metrics.md b/akka-docs/src/main/paradox/java/cluster-metrics.md index a7f87e8bb3..d89535f1c5 100644 --- a/akka-docs/src/main/paradox/java/cluster-metrics.md +++ b/akka-docs/src/main/paradox/java/cluster-metrics.md @@ -126,11 +126,11 @@ Let's take a look at this router in action. What can be more demanding than calc The backend worker that performs the factorial calculation: -@@snip [FactorialBackend.java](code/jdocs/cluster/FactorialBackend.java) { #backend } +@@snip [FactorialBackend.java]($code$/java/jdocs/cluster/FactorialBackend.java) { #backend } The frontend that receives user jobs and delegates to the backends via the router: -@@snip [FactorialFrontend.java](code/jdocs/cluster/FactorialFrontend.java) { #frontend } +@@snip [FactorialFrontend.java]($code$/java/jdocs/cluster/FactorialFrontend.java) { #frontend } As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows: @@ -160,9 +160,9 @@ other things work in the same way as other routers. The same type of router could also have been defined in code: -@@snip [FactorialFrontend.java](code/jdocs/cluster/FactorialFrontend.java) { #router-lookup-in-code } +@@snip [FactorialFrontend.java]($code$/java/jdocs/cluster/FactorialFrontend.java) { #router-lookup-in-code } -@@snip [FactorialFrontend.java](code/jdocs/cluster/FactorialFrontend.java) { #router-deploy-in-code } +@@snip [FactorialFrontend.java]($code$/java/jdocs/cluster/FactorialFrontend.java) { #router-deploy-in-code } The easiest way to run **Adaptive Load Balancing** example yourself is to download the ready to run [Akka Cluster Sample with Scala](@exampleCodeService@/akka-samples-cluster-java) @@ -173,7 +173,7 @@ The source code of this sample can be found in the [Akka Samples Repository](@sa It is possible to subscribe to the metrics events directly to implement other functionality. -@@snip [MetricsListener.java](code/jdocs/cluster/MetricsListener.java) { #metrics-listener } +@@snip [MetricsListener.java]($code$/java/jdocs/cluster/MetricsListener.java) { #metrics-listener } ## Custom Metrics Collector @@ -191,4 +191,4 @@ Custom metrics collector implementation class must be specified in the The Cluster metrics extension can be configured with the following properties: -@@snip [reference.conf]../../../../../akka-cluster-metrics/src/main/resources/reference.conf) { # } \ No newline at end of file +@@snip [reference.conf]($akka$/akka-cluster-metrics/src/main/resources/reference.conf) \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/cluster-sharding.md b/akka-docs/src/main/paradox/java/cluster-sharding.md index 86eef8b719..6bcfae24f8 100644 --- a/akka-docs/src/main/paradox/java/cluster-sharding.md +++ b/akka-docs/src/main/paradox/java/cluster-sharding.md @@ -35,7 +35,7 @@ See @ref:[Downing](cluster-usage.md#automatic-vs-manual-downing-java). This is how an entity actor may look like: -@@snip [ClusterShardingTest.java]../../../../../akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-actor } +@@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-actor } The above actor uses event sourcing and the support provided in `AbstractPersistentActor` to store its state. It does not have to be a persistent actor, but in case of failure or migration of entities between nodes it must be able to recover @@ -48,12 +48,12 @@ When using the sharding extension you are first, typically at system startup on in the cluster, supposed to register the supported entity types with the `ClusterSharding.start` method. `ClusterSharding.start` gives you the reference which you can pass along. -@@snip [ClusterShardingTest.java]../../../../../akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-start } +@@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-start } The `messageExtractor` defines application specific methods to extract the entity identifier and the shard identifier from incoming messages. -@@snip [ClusterShardingTest.java]../../../../../akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-extractor } +@@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-extractor } This example illustrates two different ways to define the entity identifier in the messages: @@ -88,7 +88,7 @@ The `ShardRegion` will lookup the location of the shard for the entity if it doe delegate the message to the right node and it will create the entity actor on demand, i.e. when the first message for a specific entity is delivered. -@@snip [ClusterShardingTest.java]../../../../../akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-usage } +@@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-usage } ## How it works @@ -300,11 +300,11 @@ If you need to use another `supervisorStrategy` for the entity actors than the d you need to create an intermediate parent actor that defines the `supervisorStrategy` to the child entity actor. -@@snip [ClusterShardingTest.java]../../../../../akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #supervisor } +@@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #supervisor } You start such a supervisor in the same way as if it was the entity actor. -@@snip [ClusterShardingTest.java]../../../../../akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-supervisor-start } +@@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-supervisor-start } Note that stopped entities will be started again when a new message is targeted to the entity. @@ -398,7 +398,7 @@ with the same layout as below. `ClusterShardingSettings` is a parameter to the ` the `ClusterSharding` extension, i.e. each each entity type can be configured with different settings if needed. -@@snip [reference.conf]../../../../../akka-cluster-sharding/src/main/resources/reference.conf) { #sharding-ext-config } +@@snip [reference.conf]($akka$/akka-cluster-sharding/src/main/resources/reference.conf) { #sharding-ext-config } Custom shard allocation strategy can be defined in an optional parameter to `ClusterSharding.start`. See the API documentation of `AbstractShardAllocationStrategy` for details diff --git a/akka-docs/src/main/paradox/java/cluster-singleton.md b/akka-docs/src/main/paradox/java/cluster-singleton.md index 87426cf447..7c08a8ccc8 100644 --- a/akka-docs/src/main/paradox/java/cluster-singleton.md +++ b/akka-docs/src/main/paradox/java/cluster-singleton.md @@ -86,7 +86,7 @@ scenario when integrating with external systems. On each node in the cluster you need to start the `ClusterSingletonManager` and supply the `Props` of the singleton actor, in this case the JMS queue consumer. -@@snip [ClusterSingletonManagerTest.java]../../../../../akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-manager } +@@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-manager } Here we limit the singleton to nodes tagged with the `"worker"` role, but all nodes, independent of role, can be used by not specifying `withRole`. @@ -98,7 +98,7 @@ perfectly fine `terminationMessage` if you only need to stop the actor. With the names given above, access to the singleton can be obtained from any cluster node using a properly configured proxy. -@@snip [ClusterSingletonManagerTest.java]../../../../../akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-proxy } +@@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-proxy } A more comprehensive sample is available in the tutorial named [Distributed workers with Akka and Java!](https://github.com/typesafehub/activator-akka-distributed-workers-java). @@ -130,7 +130,7 @@ or create it from another config section with the same layout as below. `Cluster a parameter to the `ClusterSingletonManager.props` factory method, i.e. each singleton can be configured with different settings if needed. -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) { #singleton-config } +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-config } The following configuration properties are read by the `ClusterSingletonProxySettings` when created with a `ActorSystem` parameter. It is also possible to amend the `ClusterSingletonProxySettings` @@ -138,4 +138,4 @@ or create it from another config section with the same layout as below. `Cluster a parameter to the `ClusterSingletonProxy.props` factory method, i.e. each singleton proxy can be configured with different settings if needed. -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) { #singleton-proxy-config } \ No newline at end of file +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-proxy-config } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/cluster-usage.md b/akka-docs/src/main/paradox/java/cluster-usage.md index 81db6b7753..b79244c9b7 100644 --- a/akka-docs/src/main/paradox/java/cluster-usage.md +++ b/akka-docs/src/main/paradox/java/cluster-usage.md @@ -76,7 +76,7 @@ ip-addresses or host names of the machines in `application.conf` instead of `127 An actor that uses the cluster extension may look like this: -@@snip [SimpleClusterListener.java](code/jdocs/cluster/SimpleClusterListener.java) { type=java } +@@snip [SimpleClusterListener.java]($code$/java/jdocs/cluster/SimpleClusterListener.java) { type=java } The actor registers itself as subscriber of certain cluster events. It receives events corresponding to the current state of the cluster when the subscription starts and then it receives events for changes that happen in the cluster. @@ -219,7 +219,7 @@ A more graceful exit can be performed if you tell the cluster that a node shall This can be performed using [cluster_jmx_java](#cluster-jmx-java) or [cluster_http_java](#cluster-http-java). It can also be performed programmatically with: -@@snip [ClusterDocTest.java](code/jdocs/cluster/ClusterDocTest.java) { #leave } +@@snip [ClusterDocTest.java]($code$/java/jdocs/cluster/ClusterDocTest.java) { #leave } Note that this command can be issued to any member in the cluster, not necessarily the one that is leaving. @@ -261,7 +261,7 @@ have no knowledge about the existence of the new members. You should for example You can subscribe to change notifications of the cluster membership by using `Cluster.get(system).subscribe`. -@@snip [SimpleClusterListener2.java](code/jdocs/cluster/SimpleClusterListener2.java) { #subscribe } +@@snip [SimpleClusterListener2.java]($code$/java/jdocs/cluster/SimpleClusterListener2.java) { #subscribe } A snapshot of the full state, `akka.cluster.ClusterEvent.CurrentClusterState`, is sent to the subscriber as the first message, followed by events for incremental updates. @@ -278,7 +278,7 @@ the events corresponding to the current state to mimic what you would have seen listening to the events when they occurred in the past. Note that those initial events only correspond to the current state and it is not the full history of all changes that actually has occurred in the cluster. -@@snip [SimpleClusterListener.java](code/jdocs/cluster/SimpleClusterListener.java) { #subscribe } +@@snip [SimpleClusterListener.java]($code$/java/jdocs/cluster/SimpleClusterListener.java) { #subscribe } The events to track the life-cycle of members are: @@ -313,11 +313,11 @@ added or removed to the cluster dynamically. Messages: -@@snip [TransformationMessages.java](code/jdocs/cluster/TransformationMessages.java) { #messages } +@@snip [TransformationMessages.java]($code$/java/jdocs/cluster/TransformationMessages.java) { #messages } The backend worker that performs the transformation job: -@@snip [TransformationBackend.java](code/jdocs/cluster/TransformationBackend.java) { #backend } +@@snip [TransformationBackend.java]($code$/java/jdocs/cluster/TransformationBackend.java) { #backend } Note that the `TransformationBackend` actor subscribes to cluster events to detect new, potential, frontend nodes, and send them a registration message so that they know @@ -325,7 +325,7 @@ that they can use the backend worker. The frontend that receives user jobs and delegates to one of the registered backend workers: -@@snip [TransformationFrontend.java](code/jdocs/cluster/TransformationFrontend.java) { #frontend } +@@snip [TransformationFrontend.java]($code$/java/jdocs/cluster/TransformationFrontend.java) { #frontend } Note that the `TransformationFrontend` actor watch the registered backend to be able to remove it from its list of available backend workers. @@ -376,7 +376,7 @@ You can start the actors in a `registerOnMemberUp` callback, which will be invoked when the current member status is changed to 'Up', i.e. the cluster has at least the defined number of members. -@@snip [FactorialFrontendMain.java](code/jdocs/cluster/FactorialFrontendMain.java) { #registerOnUp } +@@snip [FactorialFrontendMain.java]($code$/java/jdocs/cluster/FactorialFrontendMain.java) { #registerOnUp } This callback can be used for other things than starting actors. @@ -574,7 +574,7 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: -@@snip [StatsService.java](code/jdocs/cluster/StatsService.java) { #router-lookup-in-code } +@@snip [StatsService.java]($code$/java/jdocs/cluster/StatsService.java) { #router-lookup-in-code } See [cluster_configuration_java](#cluster-configuration-java) section for further descriptions of the settings. @@ -591,17 +591,17 @@ the average number of characters per word when all results have been collected. Messages: -@@snip [StatsMessages.java](code/jdocs/cluster/StatsMessages.java) { #messages } +@@snip [StatsMessages.java]($code$/java/jdocs/cluster/StatsMessages.java) { #messages } The worker that counts number of characters in each word: -@@snip [StatsWorker.java](code/jdocs/cluster/StatsWorker.java) { #worker } +@@snip [StatsWorker.java]($code$/java/jdocs/cluster/StatsWorker.java) { #worker } The service that receives text from users and splits it up into words, delegates to workers and aggregates: -@@snip [StatsService.java](code/jdocs/cluster/StatsService.java) { #service } +@@snip [StatsService.java]($code$/java/jdocs/cluster/StatsService.java) { #service } -@@snip [StatsAggregator.java](code/jdocs/cluster/StatsAggregator.java) { #aggregator } +@@snip [StatsAggregator.java]($code$/java/jdocs/cluster/StatsAggregator.java) { #aggregator } Note, nothing cluster specific so far, just plain actors. @@ -658,7 +658,7 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: -@@snip [StatsService.java](code/jdocs/cluster/StatsService.java) { #router-deploy-in-code } +@@snip [StatsService.java]($code$/java/jdocs/cluster/StatsService.java) { #router-deploy-in-code } See [cluster_configuration_java](#cluster-configuration-java) section for further descriptions of the settings. @@ -668,12 +668,12 @@ Let's take a look at how to use a cluster aware router on single master node tha and deploys workers. To keep track of a single master we use the @ref:[Cluster Singleton](cluster-singleton.md) in the cluster-tools module. The `ClusterSingletonManager` is started on each node. -@@snip [StatsSampleOneMasterMain.java](code/jdocs/cluster/StatsSampleOneMasterMain.java) { #create-singleton-manager } +@@snip [StatsSampleOneMasterMain.java]($code$/java/jdocs/cluster/StatsSampleOneMasterMain.java) { #create-singleton-manager } We also need an actor on each node that keeps track of where current single master exists and delegates jobs to the `StatsService`. That is provided by the `ClusterSingletonProxy`. -@@snip [StatsSampleOneMasterMain.java](code/jdocs/cluster/StatsSampleOneMasterMain.java) { #singleton-proxy } +@@snip [StatsSampleOneMasterMain.java]($code$/java/jdocs/cluster/StatsSampleOneMasterMain.java) { #singleton-proxy } The `ClusterSingletonProxy` receives text from users and delegates to the current `StatsService`, the single master. It listens to cluster events to lookup the `StatsService` on the oldest node. diff --git a/akka-docs/src/main/paradox/java/dispatchers.md b/akka-docs/src/main/paradox/java/dispatchers.md index 9cf11f280d..d3701fd66b 100644 --- a/akka-docs/src/main/paradox/java/dispatchers.md +++ b/akka-docs/src/main/paradox/java/dispatchers.md @@ -18,14 +18,14 @@ gives excellent performance in most cases. Dispatchers implement the `ExecutionContext` interface and can thus be used to run `Future` invocations etc. -@@snip [DispatcherDocTest.java](code/jdocs/dispatcher/DispatcherDocTest.java) { #lookup } +@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #lookup } ## Setting the dispatcher for an Actor So in case you want to give your `Actor` a different dispatcher than the default, you need to do two things, of which the first is is to configure the dispatcher: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #my-dispatcher-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #my-dispatcher-config } @@@ note @@ -38,7 +38,7 @@ You can read more about parallelism in the JDK's [ForkJoinPool documentation](ht Another example that uses the "thread-pool-executor": -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #fixed-pool-size-dispatcher-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #fixed-pool-size-dispatcher-config } @@@ note @@ -51,15 +51,15 @@ For more options, see the default-dispatcher section of the supervision. @@ -105,42 +105,42 @@ strategy. The following section shows the effects of the different directives in practice, where a test setup is needed. First off, we need a suitable supervisor: -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #supervisor } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #supervisor } This supervisor will be used to create a child, with which we can experiment: -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #child } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #child } The test is easier by using the utilities described in akka-testkit, where `TestProbe` provides an actor ref useful for receiving and inspecting replies. -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #testkit } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #testkit } Let us create actors: -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #create } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #create } The first test shall demonstrate the `Resume` directive, so we try it out by setting some non-initial state in the actor and have it fail: -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #resume } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #resume } As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious `NullPointerException`, that will no longer be the case: -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #restart } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #restart } And finally in case of the fatal `IllegalArgumentException` the child will be terminated by the supervisor: -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #stop } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #stop } Up to now the supervisor was completely unaffected by the child’s failure, because the directives set did handle it. In case of an `Exception`, this is not true anymore and the supervisor escalates the failure. -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #escalate-kill } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #escalate-kill } The supervisor itself is supervised by the top-level actor provided by the `ActorSystem`, which has the default policy to restart in case of all @@ -152,9 +152,9 @@ child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a different supervisor which overrides this behavior. -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #supervisor2 } +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #supervisor2 } With this parent, the child survives the escalated restart, as demonstrated in the last test: -@@snip [FaultHandlingTest.java](code/jdocs/actor/FaultHandlingTest.java) { #escalate-restart } \ No newline at end of file +@@snip [FaultHandlingTest.java]($code$/java/jdocs/actor/FaultHandlingTest.java) { #escalate-restart } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/fsm.md b/akka-docs/src/main/paradox/java/fsm.md index 99934aa570..62990b034d 100644 --- a/akka-docs/src/main/paradox/java/fsm.md +++ b/akka-docs/src/main/paradox/java/fsm.md @@ -24,11 +24,11 @@ send them on after the burst ended or a flush request is received. First, consider all of the below to use these import statements: -@@snip [Buncher.java](code/jdocs/actor/fsm/Buncher.java) { #simple-imports } +@@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #simple-imports } The contract of our “Buncher” actor is that it accepts or produces the following messages: -@@snip [Events.java](code/jdocs/actor/fsm/Events.java) { #simple-events } +@@snip [Events.java]($code$/java/jdocs/actor/fsm/Events.java) { #simple-events } `SetTarget` is needed for starting it up, setting the destination for the `Batches` to be passed on; `Queue` will add to the internal queue while @@ -37,7 +37,7 @@ The contract of our “Buncher” actor is that it accepts or produces the follo The actor can be in two states: no message queued (aka `Idle`) or some message queued (aka `Active`). The states and the state data is defined like this: -@@snip [Buncher.java](code/jdocs/actor/fsm/Buncher.java) { #simple-state } +@@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #simple-state } The actor starts out in the idle state. Once a message arrives it will go to the active state and stay there as long as messages keep arriving and no flush is @@ -46,7 +46,7 @@ reference to send the batches to and the actual queue of messages. Now let’s take a look at the skeleton for our FSM actor: -@@snip [Buncher.java](code/jdocs/actor/fsm/Buncher.java) { #simple-fsm } +@@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #simple-fsm } The basic strategy is to declare the actor, by inheriting the `AbstractFSM` class and specifying the possible states and data values as type parameters. Within @@ -74,7 +74,7 @@ shall work identically in both states, we make use of the fact that any event which is not handled by the `when()` block is passed to the `whenUnhandled()` block: -@@snip [Buncher.java](code/jdocs/actor/fsm/Buncher.java) { #unhandled-elided } +@@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #unhandled-elided } The first case handled here is adding `Queue()` requests to the internal queue and going to the `Active` state (this does the obvious thing of staying @@ -88,7 +88,7 @@ target, for which we use the `onTransition` mechanism: you can declare multiple such blocks and all of them will be tried for matching behavior in case a state transition occurs (i.e. only when the state actually changes). -@@snip [Buncher.java](code/jdocs/actor/fsm/Buncher.java) { #transition-elided } +@@snip [Buncher.java]($code$/java/jdocs/actor/fsm/Buncher.java) { #transition-elided } The transition callback is a partial function which takes as input a pair of states—the current and the next state. During the state change, the old state @@ -98,7 +98,7 @@ available as `nextStateData`. To verify that this buncher actually works, it is quite easy to write a test using the akka-testkit, here using JUnit as an example: -@@snip [BuncherTest.java](code/jdocs/actor/fsm/BuncherTest.java) { #test-code } +@@snip [BuncherTest.java]($code$/java/jdocs/actor/fsm/BuncherTest.java) { #test-code } ## Reference @@ -107,7 +107,7 @@ using the configuration enables logging of an event trace by `LoggingFSM` instances: -@@snip [FSMDocTest.java](code/jdocs/actor/fsm/FSMDocTest.java) { #logging-fsm } +@@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #logging-fsm } This FSM will log at DEBUG level: @@ -411,7 +411,7 @@ The `AbstractLoggingFSM` class adds one more feature to the FSM: a rolling event log which may be used during debugging (for tracing how the FSM entered a certain failure state) or for other creative uses: -@@snip [FSMDocTest.java](code/jdocs/actor/fsm/FSMDocTest.java) { #logging-fsm } +@@snip [FSMDocTest.java]($code$/java/jdocs/actor/fsm/FSMDocTest.java) { #logging-fsm } The `logDepth` defaults to zero, which turns off the event log. diff --git a/akka-docs/src/main/paradox/java/futures.md b/akka-docs/src/main/paradox/java/futures.md index ba51fbc118..564c50b953 100644 --- a/akka-docs/src/main/paradox/java/futures.md +++ b/akka-docs/src/main/paradox/java/futures.md @@ -16,9 +16,9 @@ which is very similar to a `java.util.concurrent.Executor`. if you have an `Acto it will use its default dispatcher as the `ExecutionContext`, or you can use the factory methods provided by the `ExecutionContexts` class to wrap `Executors` and `ExecutorServices`, or even create your own. -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports1 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports1 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #diy-execution-context } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #diy-execution-context } ## Use with Actors @@ -28,9 +28,9 @@ which only works if the original sender was an `AbstractActor`) and the second i Using the `ActorRef`'s `ask` method to send a message will return a `Future`. To wait for and retrieve the actual result the simplest method is: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports1 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports1 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #ask-blocking } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #ask-blocking } This will cause the current thread to block and wait for the `AbstractActor` to 'complete' the `Future` with it's reply. Blocking is discouraged though as it can cause performance problem. @@ -49,7 +49,7 @@ asynchronous composition as described below. To send the result of a `Future` to an `Actor`, you can use the `pipe` construct: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #pipe-to } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #pipe-to } ## Use Directly @@ -57,9 +57,9 @@ A common use case within Akka is to have some computation performed concurrently the extra utility of an `AbstractActor`. If you find yourself creating a pool of `AbstractActor`s for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports2 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports2 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #future-eval } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #future-eval } In the above code the block passed to `future` will be executed by the default `Dispatcher`, with the return value of the block used to complete the `Future` (in this case, the result would be the string: "HelloWorld"). @@ -68,19 +68,19 @@ and we also avoid the overhead of managing an `AbstractActor`. You can also create already completed Futures using the `Futures` class, which can be either successes: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #successful } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #successful } Or failures: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #failed } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #failed } It is also possible to create an empty `Promise`, to be filled later, and obtain the corresponding `Future`: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #promise } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #promise } For these examples `PrintResult` is defined as follows: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #print-result } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #print-result } ## Functional Futures @@ -93,9 +93,9 @@ The first method for working with `Future` functionally is `map`. This method ta some operation on the result of the `Future`, and returning a new result. The return value of the `map` method is another `Future` that will contain the new result: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports2 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports2 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #map } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #map } In this example we are joining two strings together within a `Future`. Instead of waiting for f1 to complete, we apply our function that calculates the length of the string using the `map` method. @@ -112,9 +112,9 @@ the `Future` has already been completed, when one of these methods is called. It is very often desirable to be able to combine different Futures with each other, below are some examples on how that can be done in a non-blocking fashion. -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports3 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports3 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #sequence } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #sequence } To better explain what happened in the example, `Future.sequence` is taking the `Iterable>` and turning it into a `Future>`. We can then use `map` to work with the `Iterable` directly, @@ -123,9 +123,9 @@ and we aggregate the sum of the `Iterable`. The `traverse` method is similar to `sequence`, but it takes a sequence of `A` and applies a function from `A` to `Future` and returns a `Future>`, enabling parallel `map` over the sequence, if you use `Futures.future` to create the `Future`. -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports4 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports4 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #traverse } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #traverse } It's as simple as that! @@ -135,9 +135,9 @@ and the type of the futures and returns something with the same type as the star and then applies the function to all elements in the sequence of futures, non-blockingly, the execution will be started when the last of the Futures is completed. -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports5 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports5 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #fold } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #fold } That's all it takes! @@ -145,9 +145,9 @@ If the sequence passed to `fold` is empty, it will return the start-value, in th In some cases you don't have a start-value and you're able to use the value of the first completing `Future` in the sequence as the start-value, you can use `reduce`, it works like this: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports6 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports6 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #reduce } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #reduce } Same as with `fold`, the execution will be started when the last of the Futures is completed, you can also parallelize it by chunking your futures into sub-sequences and reduce them, and then reduce the reduced results again. @@ -159,11 +159,11 @@ This is just a sample of what can be done. Sometimes you just want to listen to a `Future` being completed, and react to that not by creating a new Future, but by side-effecting. For this Scala supports `onComplete`, `onSuccess` and `onFailure`, of which the last two are specializations of the first. -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #onSuccess } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onSuccess } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #onFailure } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onFailure } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #onComplete } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onComplete } ## Ordering @@ -173,19 +173,19 @@ But there's a solution! And it's name is `andThen`, and it creates a new `Future the specified callback, a `Future` that will have the same result as the `Future` it's called on, which allows for ordering like in the following sample: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #and-then } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #and-then } ## Auxiliary methods `Future` `fallbackTo` combines 2 Futures into a new `Future`, and will hold the successful value of the second `Future` if the first `Future` fails. -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #fallback-to } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #fallback-to } You can also combine two Futures into a new `Future` that will hold a tuple of the two Futures successful results, using the `zip` operation. -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #zip } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #zip } ## Exceptions @@ -197,7 +197,7 @@ calling `Await.result` will cause it to be thrown again so it can be handled pro It is also possible to handle an `Exception` by returning a different result. This is done with the `recover` method. For example: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #recover } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #recover } In this example, if the actor replied with a `akka.actor.Status.Failure` containing the `ArithmeticException`, our `Future` would have a result of 0. The `recover` method works very similarly to the standard try/catch blocks, @@ -207,15 +207,15 @@ it will behave as if we hadn't used the `recover` method. You can also use the `recoverWith` method, which has the same relationship to `recover` as `flatMap` has to `map`, and is use like this: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #try-recover } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #try-recover } ## After `akka.pattern.Patterns.after` makes it easy to complete a `Future` with a value or exception after a timeout. -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #imports7 } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports7 } -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #after } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #after } ## Java 8, CompletionStage and CompletableFuture @@ -247,7 +247,7 @@ All *async* methods without an explicit Executor are performed using the `ForkJo When non-async methods are applied on a not yet completed `CompletionStage`, they are completed by the thread which completes initial `CompletionStage`: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #apply-completion-thread } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-completion-thread } In this example Scala `Future` is converted to `CompletionStage` just like Akka does. The completion is delayed: we are calling `thenApply` multiple times on a not yet complete `CompletionStage`, then @@ -262,7 +262,7 @@ default `thenApply` breaks the chain and executes on `ForkJoinPool.commonPool()` In the next example `thenApply` methods are executed on an already completed `Future`/`CompletionStage`: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #apply-main-thread } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-main-thread } First `thenApply` is still executed on `ForkJoinPool.commonPool()` (because it is actually `thenApplyAsync` which is always executed on global Java pool). @@ -274,11 +274,11 @@ and stages are executed on the current thread - the thread which called second a As mentioned above, default *async* methods are always executed on `ForkJoinPool.commonPool()`: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #apply-async-default } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-async-default } `CompletionStage` also has *async* methods which take `Executor` as a second parameter, just like `Future`: -@@snip [FutureDocTest.java](code/jdocs/future/FutureDocTest.java) { #apply-async-executor } +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-async-executor } This example is behaving like `Future`: every stage is executed on an explicitly specified `Executor`. diff --git a/akka-docs/src/main/paradox/java/howto.md b/akka-docs/src/main/paradox/java/howto.md index d1105b43e4..51d9e17cf8 100644 --- a/akka-docs/src/main/paradox/java/howto.md +++ b/akka-docs/src/main/paradox/java/howto.md @@ -29,7 +29,7 @@ sent, and how long the initial delay is. Worst case scenario is `interval` plus @@@ -@@snip [SchedulerPatternTest.java](code/jdocs/pattern/SchedulerPatternTest.java) { #schedule-constructor } +@@snip [SchedulerPatternTest.java]($code$/java/jdocs/pattern/SchedulerPatternTest.java) { #schedule-constructor } The second variant sets up an initial one shot message send in the `preStart` method of the actor, and the then the actor when it receives this message sets up a new one shot @@ -43,7 +43,7 @@ under pressure, but only schedule a new tick message when we have seen the previ @@@ -@@snip [SchedulerPatternTest.java](code/jdocs/pattern/SchedulerPatternTest.java) { #schedule-receive } +@@snip [SchedulerPatternTest.java]($code$/java/jdocs/pattern/SchedulerPatternTest.java) { #schedule-receive } ## Single-Use Actor Trees with High-Level Error Reporting @@ -67,7 +67,7 @@ Finally the promise returned by Patterns.ask() is fulfilled as a failure, includ Let's have a look at the example code: -@@snip [SupervisedAsk.java](code/jdocs/pattern/SupervisedAsk.java) { # } +@@snip [SupervisedAsk.java]($code$/java/jdocs/pattern/SupervisedAsk.java) In the askOf method the SupervisorCreator is sent the user message. The SupervisorCreator creates a SupervisorActor and forwards the message. @@ -80,4 +80,4 @@ Afterwards the actor hierarchy is stopped. Finally we are able to execute an actor and receive the results or exceptions. -@@snip [SupervisedAskSpec.java](code/jdocs/pattern/SupervisedAskSpec.java) { # } \ No newline at end of file +@@snip [SupervisedAskSpec.java]($code$/java/jdocs/pattern/SupervisedAskSpec.java) \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/io-tcp.md b/akka-docs/src/main/paradox/java/io-tcp.md index 87047e3e34..48e6a78b38 100644 --- a/akka-docs/src/main/paradox/java/io-tcp.md +++ b/akka-docs/src/main/paradox/java/io-tcp.md @@ -2,19 +2,19 @@ The code snippets through-out this section assume the following imports: -@@snip [IODocTest.java](code/jdocs/io/japi/IODocTest.java) { #imports } +@@snip [IODocTest.java]($code$/java/jdocs/io/japi/IODocTest.java) { #imports } All of the Akka I/O APIs are accessed through manager objects. When using an I/O API, the first step is to acquire a reference to the appropriate manager. The code below shows how to acquire a reference to the `Tcp` manager. -@@snip [EchoManager.java](code/jdocs/io/japi/EchoManager.java) { #manager } +@@snip [EchoManager.java]($code$/java/jdocs/io/japi/EchoManager.java) { #manager } The manager is an actor that handles the underlying low level I/O resources (selectors, channels) and instantiates workers for specific tasks, such as listening to incoming connections. ## Connecting -@@snip [IODocTest.java](code/jdocs/io/japi/IODocTest.java) { #client } +@@snip [IODocTest.java]($code$/java/jdocs/io/japi/IODocTest.java) { #client } The first step of connecting to a remote address is sending a `Connect` message to the TCP manager; in addition to the simplest form shown above there @@ -56,7 +56,7 @@ fine-grained connection close events, see [Closing Connections](#closing-connect ## Accepting connections -@@snip [IODocTest.java](code/jdocs/io/japi/IODocTest.java) { #server } +@@snip [IODocTest.java]($code$/java/jdocs/io/japi/IODocTest.java) { #server } To create a TCP server and listen for inbound connections, a `Bind` command has to be sent to the TCP manager. This will instruct the TCP manager @@ -75,7 +75,7 @@ handler when sending the `Register` message. Writes can be sent from any actor in the system to the connection actor (i.e. the actor which sent the `Connected` message). The simplistic handler is defined as: -@@snip [IODocTest.java](code/jdocs/io/japi/IODocTest.java) { #simplistic-handler } +@@snip [IODocTest.java]($code$/java/jdocs/io/japi/IODocTest.java) { #simplistic-handler } For a more complete sample which also takes into account the possibility of failures when sending please see [Throttling Reads and Writes](#throttling-reads-and-writes) below. @@ -211,18 +211,18 @@ this allows the example `EchoHandler` to write all outstanding data back to the client before fully closing the connection. This is enabled using a flag upon connection activation (observe the `Register` message): -@@snip [EchoManager.java](code/jdocs/io/japi/EchoManager.java) { #echo-manager } +@@snip [EchoManager.java]($code$/java/jdocs/io/japi/EchoManager.java) { #echo-manager } With this preparation let us dive into the handler itself: -@@snip [SimpleEchoHandler.java](code/jdocs/io/japi/SimpleEchoHandler.java) { #simple-echo-handler } +@@snip [SimpleEchoHandler.java]($code$/java/jdocs/io/japi/SimpleEchoHandler.java) { #simple-echo-handler } The principle is simple: when having written a chunk always wait for the `Ack` to come back before sending the next chunk. While waiting we switch behavior such that new incoming data are buffered. The helper functions used are a bit lengthy but not complicated: -@@snip [SimpleEchoHandler.java](code/jdocs/io/japi/SimpleEchoHandler.java) { #simple-helpers } +@@snip [SimpleEchoHandler.java]($code$/java/jdocs/io/japi/SimpleEchoHandler.java) { #simple-helpers } The most interesting part is probably the last: an `Ack` removes the oldest data chunk from the buffer, and if that was the last chunk then we either close @@ -243,14 +243,14 @@ how end-to-end back-pressure is realized across a TCP connection. ## NACK-Based Write Back-Pressure with Suspending -@@snip [EchoHandler.java](code/jdocs/io/japi/EchoHandler.java) { #echo-handler } +@@snip [EchoHandler.java]($code$/java/jdocs/io/japi/EchoHandler.java) { #echo-handler } The principle here is to keep writing until a `CommandFailed` is received, using acknowledgements only to prune the resend buffer. When a such a failure was received, transition into a different state for handling and handle resending of all queued data: -@@snip [EchoHandler.java](code/jdocs/io/japi/EchoHandler.java) { #buffering } +@@snip [EchoHandler.java]($code$/java/jdocs/io/japi/EchoHandler.java) { #buffering } It should be noted that all writes which are currently buffered have also been sent to the connection actor upon entering this state, which means that the @@ -263,7 +263,7 @@ is exploited by the `EchoHandler` to switch to an ACK-based approach for the first ten writes after a failure before resuming the optimistic write-through behavior. -@@snip [EchoHandler.java](code/jdocs/io/japi/EchoHandler.java) { #closing } +@@snip [EchoHandler.java]($code$/java/jdocs/io/japi/EchoHandler.java) { #closing } Closing the connection while still sending all data is a bit more involved than in the ACK-based approach: the idea is to always send all outstanding messages @@ -272,7 +272,7 @@ behavior to await the `WritingResumed` event and start over. The helper functions are very similar to the ACK-based case: -@@snip [EchoHandler.java](code/jdocs/io/japi/EchoHandler.java) { #helpers } +@@snip [EchoHandler.java]($code$/java/jdocs/io/japi/EchoHandler.java) { #helpers } ## Read Back-Pressure with Pull Mode @@ -284,7 +284,7 @@ since the rate of writing might be slower than the rate of the arrival of new da With the Pull mode this buffer can be completely eliminated as the following snippet demonstrates: -@@snip [JavaReadBackPressure.java](code/jdocs/io/JavaReadBackPressure.java) { #pull-reading-echo } +@@snip [JavaReadBackPressure.java]($code$/java/jdocs/io/JavaReadBackPressure.java) { #pull-reading-echo } The idea here is that reading is not resumed until the previous write has been completely acknowledged by the connection actor. Every pull mode connection @@ -297,7 +297,7 @@ a buffer. To enable pull reading on an outbound connection the `pullMode` parameter of the `Connect` should be set to `true`: -@@snip [JavaReadBackPressure.java](code/jdocs/io/JavaReadBackPressure.java) { #pull-mode-connect } +@@snip [JavaReadBackPressure.java]($code$/java/jdocs/io/JavaReadBackPressure.java) { #pull-mode-connect } ### Pull Mode Reading for Inbound Connections @@ -305,7 +305,7 @@ The previous section demonstrated how to enable pull reading mode for outbound connections but it is possible to create a listener actor with this mode of reading by setting the `pullMode` parameter of the `Bind` command to `true`: -@@snip [JavaReadBackPressure.java](code/jdocs/io/JavaReadBackPressure.java) { #pull-mode-bind } +@@snip [JavaReadBackPressure.java]($code$/java/jdocs/io/JavaReadBackPressure.java) { #pull-mode-bind } One of the effects of this setting is that all connections accepted by this listener actor will use pull mode reading. @@ -318,7 +318,7 @@ it a `ResumeAccepting` message. Listener actors with pull mode start suspended so to start accepting connections a `ResumeAccepting` command has to be sent to the listener actor after binding was successful: -@@snip [JavaReadBackPressure.java](code/jdocs/io/JavaReadBackPressure.java) { #pull-accepting } +@@snip [JavaReadBackPressure.java]($code$/java/jdocs/io/JavaReadBackPressure.java) { #pull-accepting } As shown in the example after handling an incoming connection we need to resume accepting again. The `ResumeAccepting` message accepts a `batchSize` parameter that specifies how diff --git a/akka-docs/src/main/paradox/java/io-udp.md b/akka-docs/src/main/paradox/java/io-udp.md index 781252607f..494e6a1069 100644 --- a/akka-docs/src/main/paradox/java/io-udp.md +++ b/akka-docs/src/main/paradox/java/io-udp.md @@ -18,7 +18,7 @@ offered using distinct IO extensions described below. ### Simple Send -@@snip [UdpDocTest.java](code/jdocs/io/UdpDocTest.java) { #sender } +@@snip [UdpDocTest.java]($code$/java/jdocs/io/UdpDocTest.java) { #sender } The simplest form of UDP usage is to just send datagrams without the need of getting a reply. To this end a “simple sender” facility is provided as @@ -39,7 +39,7 @@ want to close the ephemeral port the sender is bound to. ### Bind (and Send) -@@snip [UdpDocTest.java](code/jdocs/io/UdpDocTest.java) { #listener } +@@snip [UdpDocTest.java]($code$/java/jdocs/io/UdpDocTest.java) { #listener } If you want to implement a UDP server which listens on a socket for incoming datagrams then you need to use the `bind` command as shown above. The @@ -64,7 +64,7 @@ bind-and-send service we saw earlier, but the main difference is that a connection is only able to send to the `remoteAddress` it was connected to, and will receive datagrams only from that address. -@@snip [UdpDocTest.java](code/jdocs/io/UdpDocTest.java) { #connected } +@@snip [UdpDocTest.java]($code$/java/jdocs/io/UdpDocTest.java) { #connected } Consequently the example shown here looks quite similar to the previous one, the biggest difference is the absence of remote address information in @@ -90,12 +90,12 @@ To select a Protocol Family you must extend `akka.io.Inet.DatagramChannelCreator class which implements `akka.io.Inet.SocketOption`. Provide custom logic for opening a datagram channel by overriding `create` method. -@@snip [JavaUdpMulticast.java](code/jdocs/io/JavaUdpMulticast.java) { #inet6-protocol-family } +@@snip [JavaUdpMulticast.java]($code$/java/jdocs/io/JavaUdpMulticast.java) { #inet6-protocol-family } Another socket option will be needed to join a multicast group. -@@snip [JavaUdpMulticast.java](code/jdocs/io/JavaUdpMulticast.java) { #multicast-group } +@@snip [JavaUdpMulticast.java]($code$/java/jdocs/io/JavaUdpMulticast.java) { #multicast-group } Socket options must be provided to `UdpMessage.bind` command. -@@snip [JavaUdpMulticast.java](code/jdocs/io/JavaUdpMulticast.java) { #bind } \ No newline at end of file +@@snip [JavaUdpMulticast.java]($code$/java/jdocs/io/JavaUdpMulticast.java) { #bind } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/io.md b/akka-docs/src/main/paradox/java/io.md index 6791a9c7d9..0b55a3f803 100644 --- a/akka-docs/src/main/paradox/java/io.md +++ b/akka-docs/src/main/paradox/java/io.md @@ -22,7 +22,7 @@ as an entry point for the API. I/O is broken into several drivers. The manager f is accessible by querying an `ActorSystem`. For example the following code looks up the TCP manager and returns its `ActorRef`: -@@snip [EchoManager.java](code/jdocs/io/japi/EchoManager.java) { #manager } +@@snip [EchoManager.java]($code$/java/jdocs/io/japi/EchoManager.java) { #manager } The manager receives I/O command messages and instantiates worker actors in response. The worker actors present themselves to the API user in the reply to the command that was sent. For example after a `Connect` command sent to diff --git a/akka-docs/src/main/paradox/java/logging.md b/akka-docs/src/main/paradox/java/logging.md index 03c23b13b9..9e052d6671 100644 --- a/akka-docs/src/main/paradox/java/logging.md +++ b/akka-docs/src/main/paradox/java/logging.md @@ -12,9 +12,9 @@ synchronously. Create a `LoggingAdapter` and use the `error`, `warning`, `info`, or `debug` methods, as illustrated in this example: -@@snip [LoggingDocTest.java](code/jdocs/event/LoggingDocTest.java) { #imports } +@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports } -@@snip [LoggingDocTest.java](code/jdocs/event/LoggingDocTest.java) { #my-actor } +@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #my-actor } The first parameter to `Logging.getLogger` could also be any `LoggingBus`, specifically `system.eventStream()`; in the demonstrated @@ -36,7 +36,7 @@ placeholders results in a warning being appended to the log statement (i.e. on the same line with the same severity). You may pass a Java array as the only substitution argument to have its elements be treated individually: -@@snip [LoggingDocTest.java](code/jdocs/event/LoggingDocTest.java) { #array } +@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #array } The Java `Class` of the log source is also included in the generated `LogEvent`. In case of a simple string this is replaced with a “marker” @@ -246,9 +246,9 @@ logger available in the 'akka-slf4j' module. Example of creating a listener: -@@snip [LoggingDocTest.java](code/jdocs/event/LoggingDocTest.java) { #imports #imports-listener } +@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports #imports-listener } -@@snip [LoggingDocTest.java](code/jdocs/event/LoggingDocTest.java) { #my-event-listener } +@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #my-event-listener } ## Logging to stdout during startup and shutdown @@ -413,9 +413,9 @@ if it is not set to a new map. Use `log.clearMDC()`. @@@ -@@snip [LoggingDocTest.java](code/jdocs/event/LoggingDocTest.java) { #imports-mdc } +@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports-mdc } -@@snip [LoggingDocTest.java](code/jdocs/event/LoggingDocTest.java) { #mdc-actor } +@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #mdc-actor } Now, the values will be available in the MDC, so you can use them in the layout pattern: diff --git a/akka-docs/src/main/paradox/java/mailboxes.md b/akka-docs/src/main/paradox/java/mailboxes.md index 431d93bb19..0bed48c648 100644 --- a/akka-docs/src/main/paradox/java/mailboxes.md +++ b/akka-docs/src/main/paradox/java/mailboxes.md @@ -12,12 +12,12 @@ It is possible to require a certain type of message queue for a certain type of by having that actor implement the parameterized interface `RequiresMessageQueue`. Here is an example: -@@snip [MyBoundedActor.java](code/jdocs/actor/MyBoundedActor.java) { #my-bounded-untyped-actor } +@@snip [MyBoundedActor.java]($code$/java/jdocs/actor/MyBoundedActor.java) { #my-bounded-untyped-actor } The type parameter to the `RequiresMessageQueue` interface needs to be mapped to a mailbox in configuration like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #bounded-mailbox-config #required-mailbox-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #bounded-mailbox-config #required-mailbox-config } Now every time you create an actor of type `MyBoundedActor` it will try to get a bounded mailbox. If the actor has a different mailbox configured in deployment, either directly or via @@ -181,27 +181,27 @@ The following mailboxes should only be used with zero `mailbox-push-timeout-time How to create a PriorityMailbox: -@@snip [DispatcherDocTest.java](../java/code/jdocs/dispatcher/DispatcherDocTest.java) { #prio-mailbox } +@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-mailbox } And then add it to the configuration: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher-config } And then an example on how you would use it: -@@snip [DispatcherDocTest.java](../java/code/jdocs/dispatcher/DispatcherDocTest.java) { #prio-dispatcher } +@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-dispatcher } It is also possible to configure a mailbox type directly like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config-java #mailbox-deployment-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config-java #mailbox-deployment-config } And then use it either from deployment like this: -@@snip [DispatcherDocTest.java](code/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-config } +@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-config } Or code like this: -@@snip [DispatcherDocTest.java](code/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-code } +@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-code } ### ControlAwareMailbox @@ -210,23 +210,23 @@ immediately no matter how many other messages are already in its mailbox. It can be configured like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-config } Control messages need to extend the `ControlMessage` trait: -@@snip [DispatcherDocTest.java](../java/code/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-mailbox-messages } +@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-mailbox-messages } And then an example on how you would use it: -@@snip [DispatcherDocTest.java](../java/code/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-dispatcher } +@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-dispatcher } ## Creating your own Mailbox type An example is worth a thousand quacks: -@@snip [MyUnboundedMailbox.java](code/jdocs/dispatcher/MyUnboundedMailbox.java) { #mailbox-implementation-example } +@@snip [MyUnboundedMailbox.java]($code$/java/jdocs/dispatcher/MyUnboundedMailbox.java) { #mailbox-implementation-example } -@@snip [MyUnboundedMessageQueueSemantics.java](code/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java) { #mailbox-implementation-example } +@@snip [MyUnboundedMessageQueueSemantics.java]($code$/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java) { #mailbox-implementation-example } And then you just specify the FQCN of your MailboxType as the value of the "mailbox-type" in the dispatcher configuration, or the mailbox configuration. @@ -245,11 +245,11 @@ dispatcher or mailbox setting using it. You can also use the mailbox as a requirement on the dispatcher like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #custom-mailbox-config-java } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #custom-mailbox-config-java } Or by defining the requirement on your actor class like this: -@@snip [DispatcherDocTest.java](code/jdocs/dispatcher/DispatcherDocTest.java) { #require-mailbox-on-actor } +@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #require-mailbox-on-actor } ## Special Semantics of `system.actorOf` diff --git a/akka-docs/src/main/paradox/java/persistence-query-leveldb.md b/akka-docs/src/main/paradox/java/persistence-query-leveldb.md index 6b23930f4a..c912265341 100644 --- a/akka-docs/src/main/paradox/java/persistence-query-leveldb.md +++ b/akka-docs/src/main/paradox/java/persistence-query-leveldb.md @@ -21,7 +21,7 @@ Make sure that you have the following dependency in your project: The `ReadJournal` is retrieved via the `akka.persistence.query.PersistenceQuery` extension: -@@snip [LeveldbPersistenceQueryDocTest.java](code/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #get-read-journal } +@@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #get-read-journal } ## Supported Queries @@ -30,7 +30,7 @@ extension: `eventsByPersistenceId` is used for retrieving events for a specific `PersistentActor` identified by `persistenceId`. -@@snip [LeveldbPersistenceQueryDocTest.java](code/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #EventsByPersistenceId } +@@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #EventsByPersistenceId } You can retrieve a subset of all events by specifying `fromSequenceNr` and `toSequenceNr` or use `0L` and `Long.MAX_VALUE` respectively to retrieve all events. Note that @@ -58,7 +58,7 @@ backend journal. `allPersistenceIds` is used for retrieving all `persistenceIds` of all persistent actors. -@@snip [LeveldbPersistenceQueryDocTest.java](code/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #AllPersistenceIds } +@@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #AllPersistenceIds } The returned event stream is unordered and you can expect different order for multiple executions of the query. @@ -79,12 +79,12 @@ backend journal. `eventsByTag` is used for retrieving events that were marked with a given tag, e.g. all domain events of an Aggregate Root type. -@@snip [LeveldbPersistenceQueryDocTest.java](code/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #EventsByTag } +@@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #EventsByTag } To tag events you create an @ref:[Event Adapters](persistence.md#event-adapters-java) that wraps the events in a `akka.persistence.journal.Tagged` with the given `tags`. -@@snip [LeveldbPersistenceQueryDocTest.java](code/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #tagger } +@@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #tagger } You can use `NoOffset` to retrieve all events with a given tag or retrieve a subset of all events by specifying a `Sequence` `offset`. The `offset` corresponds to an ordered sequence number for @@ -132,4 +132,4 @@ for the default `LeveldbReadJournal.Identifier`. It can be configured with the following properties: -@@snip [reference.conf]../../../../../akka-persistence-query/src/main/resources/reference.conf) { #query-leveldb } \ No newline at end of file +@@snip [reference.conf]($akka$/akka-persistence-query/src/main/resources/reference.conf) { #query-leveldb } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/persistence-query.md b/akka-docs/src/main/paradox/java/persistence-query.md index 34f5a5a8fe..1c6edbbc46 100644 --- a/akka-docs/src/main/paradox/java/persistence-query.md +++ b/akka-docs/src/main/paradox/java/persistence-query.md @@ -42,7 +42,7 @@ Read journals are implemented as [Community plugins](http://akka.io/community/#p databases). For example, given a library that provides a `akka.persistence.query.my-read-journal` obtaining the related journal is as simple as: -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #basic-usage } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #basic-usage } Journal implementers are encouraged to put this identifier in a variable known to the user, such that one can access it via `getJournalFor(NoopJournal.class, NoopJournal.identifier)`, however this is not enforced. @@ -71,11 +71,11 @@ The predefined queries are: By default this stream should be assumed to be a "live" stream, which means that the journal should keep emitting new persistence ids as they come into the system: -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #all-persistence-ids-live } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #all-persistence-ids-live } If your usage does not require a live stream, you can use the `currentPersistenceIds` query: -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #all-persistence-ids-snap } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #all-persistence-ids-snap } #### EventsByPersistenceIdQuery and CurrentEventsByPersistenceIdQuery @@ -83,7 +83,7 @@ If your usage does not require a live stream, you can use the `currentPersistenc however, since it is a stream it is possible to keep it alive and watch for additional incoming events persisted by the persistent actor identified by the given `persistenceId`. -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #events-by-persistent-id } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #events-by-persistent-id } Most journals will have to revert to polling in order to achieve this, which can typically be configured with a `refresh-interval` configuration property. @@ -102,7 +102,7 @@ Some journals may support tagging of events via an @ref:[Event Adapters](persist `akka.persistence.journal.Tagged` with the given `tags`. The journal may support other ways of doing tagging - again, how exactly this is implemented depends on the used journal. Here is an example of such a tagging event adapter: -@@snip [LeveldbPersistenceQueryDocTest.java](code/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #tagger } +@@snip [LeveldbPersistenceQueryDocTest.java]($code$/java/jdocs/persistence/query/LeveldbPersistenceQueryDocTest.java) { #tagger } @@@ note @@ -119,7 +119,7 @@ In the example below we query all events which have been tagged (we assume this @ref:[EventAdapter](persistence.md#event-adapters-java), or that the journal is smart enough that it can figure out what we mean by this tag - for example if the journal stored the events as json it may try to find those with the field `tag` set to this value etc.). -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #events-by-tag } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #events-by-tag } As you can see, we can use all the usual stream combinators available from @ref:[Streams](stream/index.md) on the resulting query stream, including for example taking the first 10 and cancelling the stream. It is worth pointing out that the built-in `EventsByTag` @@ -139,11 +139,11 @@ stream, for example if it's finite or infinite, strictly ordered or not ordered is defined as the second type parameter of the returned `Source`, which allows journals to provide users with their specialised query object, as demonstrated in the sample below: -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-types } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-types } -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-definition } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-definition } -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-usage } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #advanced-journal-query-usage } ## Performance and denormalization @@ -174,7 +174,7 @@ it may be more efficient or interesting to query it (instead of the source event If the read datastore exposes a [Reactive Streams](http://reactive-streams.org) interface then implementing a simple projection is as simple as, using the read-journal and feeding it into the databases driver interface, for example like so: -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-rs } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-rs } ### Materialize view using mapAsync @@ -184,9 +184,9 @@ you may have to implement the write logic using plain functions or Actors instea In case your write logic is state-less and you just need to convert the events from one data type to another before writing into the alternative datastore, then the projection is as simple as: -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-simple-classes } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-simple-classes } -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-simple } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-simple } ### Resumable projections @@ -198,9 +198,9 @@ The example below additionally highlights how you would use Actors to implement you need to do some complex logic that would be best handled inside an Actor before persisting the event into the other datastore: -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-actor-run } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-actor-run } -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-actor } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #projection-into-different-store-actor } ## Query plugins @@ -232,11 +232,11 @@ As illustrated below one of the implementations can delegate to the other. Below is a simple journal implementation: -@@snip [PersistenceQueryDocTest.java](code/jdocs/persistence/PersistenceQueryDocTest.java) { #my-read-journal } +@@snip [PersistenceQueryDocTest.java]($code$/java/jdocs/persistence/PersistenceQueryDocTest.java) { #my-read-journal } And the `EventsByTag` could be backed by such an Actor for example: -@@snip [MyEventsByTagJavaPublisher.java](code/jdocs/persistence/query/MyEventsByTagJavaPublisher.java) { #events-by-tag-publisher } +@@snip [MyEventsByTagJavaPublisher.java]($code$/java/jdocs/persistence/query/MyEventsByTagJavaPublisher.java) { #events-by-tag-publisher } The `ReadJournalProvider` class must have a constructor with one of these signatures: diff --git a/akka-docs/src/main/paradox/java/persistence-schema-evolution.md b/akka-docs/src/main/paradox/java/persistence-schema-evolution.md index 8f6aa1ff39..1bfd686a87 100644 --- a/akka-docs/src/main/paradox/java/persistence-schema-evolution.md +++ b/akka-docs/src/main/paradox/java/persistence-schema-evolution.md @@ -155,15 +155,15 @@ For more in-depth explanations on how serialization picks the serializer to use First we start by defining our domain model class, here representing a person: -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #simplest-custom-serializer-model } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #simplest-custom-serializer-model } Next we implement a serializer (or extend an existing one to be able to handle the new `Person` class): -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #simplest-custom-serializer } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #simplest-custom-serializer } And finally we register the serializer and bind it to handle the `docs.persistence.Person` class: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](../scala/code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-config } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-config } Deserialization will be performed by the same serializer which serialized the message initially because of the `identifier` being stored together with the message. @@ -198,20 +198,20 @@ While being able to read messages with missing fields is half of the solution, y values somehow. This is usually modeled as some kind of default value, or by representing the field as an `Optional` See below for an example how reading an optional field from a serialized protocol buffers message might look like. -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #protobuf-read-optional-model } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #protobuf-read-optional-model } Next we prepare an protocol definition using the protobuf Interface Description Language, which we'll use to generate the serializer code to be used on the Akka Serialization layer (notice that the schema aproach allows us to easily rename fields, as long as the numeric identifiers of the fields do not change): -@@snip [FlightAppModels.proto]../../protobuf/FlightAppModels.proto) { #protobuf-read-optional-proto } +@@snip [FlightAppModels.proto]($code$/protobuf/FlightAppModels.proto) { #protobuf-read-optional-proto } The serializer implementation uses the protobuf generated classes to marshall the payloads. Optional fields can be handled explicitly or missing values by calling the `has...` methods on the protobuf object, which we do for `seatType` in order to use a `Unknown` type in case the event was stored before we had introduced the field to this event type: -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #protobuf-read-optional } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #protobuf-read-optional } ### Rename fields @@ -237,7 +237,7 @@ add the overhead of having to maintain the schema. When using serializers like t > This is how such a rename would look in protobuf: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](../scala/code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-rename-proto } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-rename-proto } It is important to learn about the strengths and limitations of your serializers, in order to be able to move swiftly and refactor your models fearlessly as you go on with the project. @@ -267,7 +267,7 @@ The following snippet showcases how one could apply renames if working with plai `JsObject` as an example JSON representation): -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #rename-plain-json } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #rename-plain-json } As you can see, manually handling renames induces some boilerplate onto the EventAdapter, however much of it you will find is common infrastructure code that can be either provided by an external library (for promotion management) @@ -333,12 +333,12 @@ Other events (**E**) can simply be passed through. The serializer detects that the string manifest points to a removed event type and skips attempting to deserialize it: -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #string-serializer-skip-deleved-event-by-manifest } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #string-serializer-skip-deleved-event-by-manifest } The EventAdapter we implemented is aware of `EventDeserializationSkipped` events (our "Tombstones"), and emits and empty `EventSeq` whenever such object is encoutered: -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #string-serializer-skip-deleved-event-by-manifest-adapter } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #string-serializer-skip-deleved-event-by-manifest-adapter } ### Detach domain model from data model @@ -367,13 +367,13 @@ include additional data for the event (e.g. tags), for ease of later querying. We will use the following domain and data models to showcase how the separation can be implemented by the adapter: -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models } The `EventAdapter` takes care of converting from one model to the other one (in both directions), alowing the models to be completely detached from each other, such that they can be optimised independently as long as the mapping logic is able to convert between them: -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models-adapter } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models-adapter } The same technique could also be used directly in the Serializer if the end result of marshalling is bytes. Then the serializer can simply convert the bytes do the domain object by using the generated protobuf builders. @@ -395,7 +395,7 @@ In this aproach, the `EventAdapter` is used as the marshalling layer: it seriali The journal plugin notices that the incoming event type is JSON (for example by performing a `match` on the incoming event) and stores the incoming object directly. -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models-adapter-json } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models-adapter-json } @@@ note @@ -449,7 +449,7 @@ During recovery however, we now need to convert the old `V1` model into the `V2` Depending if the old event contains a name change, we either emit the `UserNameChanged` or we don't, and the address change is handled similarily: -@@snip [PersistenceSchemaEvolutionDocTest.java](code/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #split-events-during-recovery } +@@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #split-events-during-recovery } By returning an `EventSeq` from the event adapter, the recovered event can be converted to multiple events before being delivered to the persistent actor. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/persistence.md b/akka-docs/src/main/paradox/java/persistence.md index 4130cfa3ac..a5857401d6 100644 --- a/akka-docs/src/main/paradox/java/persistence.md +++ b/akka-docs/src/main/paradox/java/persistence.md @@ -74,7 +74,7 @@ Akka persistence supports event sourcing with the `AbstractPersistentActor` abst class uses the `persist` method to persist and handle events. The behavior of an `AbstractPersistentActor` is defined by implementing `createReceiveRecover` and `createReceive`. This is demonstrated in the following example. -@@snip [PersistentActorExample.java]../../../../../akka-docs/rst/java/code/jdocs/persistence/PersistentActorExample.java) { #persistent-actor-example } +@@snip [PersistentActorExample.java]($code$/java/jdocs/persistence/PersistentActorExample.java) { #persistent-actor-example } The example defines two data types, `Cmd` and `Evt` to represent commands and events, respectively. The `state` of the `ExamplePersistentActor` is a list of persisted event data contained in `ExampleState`. @@ -124,7 +124,7 @@ behavior when replaying the events. When replay is completed it will use the new A persistent actor must have an identifier that doesn't change across different actor incarnations. The identifier must be defined with the `persistenceId` method. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #persistence-id-override } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #persistence-id-override } @@@ note @@ -168,7 +168,7 @@ To skip loading snapshots and replay all events you can use `SnapshotSelectionCr This can be useful if snapshot serialization format has changed in an incompatible way. It should typically not be used when events have been deleted. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-no-snap } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-no-snap } Another example, which can be fun for experiments but probably not in a real application, is setting an upper bound to the replay which allows the actor to be replayed to a certain point "in the past" @@ -176,24 +176,24 @@ instead to its most up to date state. Note that after that it is a bad idea to p events because a later recovery will probably be confused by the new events that follow the events that were previously skipped. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-custom } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-custom } Recovery can be disabled by returning `Recovery.none()` in the `recovery` method of a `PersistentActor`: -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-disabled } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-disabled } #### Recovery status A persistent actor can query its own recovery status via the methods -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-status } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-status } Sometimes there is a need for performing additional initialization when the recovery has completed before processing any other message sent to the persistent actor. The persistent actor will receive a special `RecoveryCompleted` message right after recovery and before any other received messages. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-completed } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-completed } If there is a problem with recovering the state of the actor from the journal, `onRecoveryFailure` is called (logging the error by default), and the actor will be stopped. @@ -261,7 +261,7 @@ stash incoming Commands while the Journal is still working on persisting and/or In the below example, the event callbacks may be called "at any time", even after the next Command has been processed. The ordering between events is still guaranteed ("evt-b-1" will be sent after "evt-a-2", which will be sent after "evt-a-1" etc.). -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #persist-async } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #persist-async } @@@ note @@ -288,16 +288,16 @@ use it for *read* operations, and actions which do not have corresponding events Using this method is very similar to the persist family of methods, yet it does **not** persist the passed in event. It will be kept in memory and used when invoking the handler. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer } Notice that the `getSender()` method is **safe** to call in the handler callback, and will be pointing to the original sender of the command for which this `deferAsync` handler was called. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer-caller } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer-caller } You can also call `deferAsync` with `persist`. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer-with-persist } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #defer-with-persist } @@@ warning @@ -317,11 +317,11 @@ however there are situations where it may be useful. It is important to understa those situations, as well as their implication on the stashing behaviour (that `persist()` enforces). In the following example two persist calls are issued, and each of them issues another persist inside its callback: -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persist-persist } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persist-persist } When sending two commands to this `PersistentActor`, the persist handlers will be executed in the following order: -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persist-persist-caller } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persist-persist-caller } First the "outer layer" of persist calls is issued and their callbacks are applied. After these have successfully completed, the inner callbacks will be invoked (once the events they are persisting have been confirmed to be persisted by the journal). @@ -331,11 +331,11 @@ is extended until all nested `persist` callbacks have been handled. It is also possible to nest `persistAsync` calls, using the same pattern: -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persistAsync-persistAsync } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persistAsync-persistAsync } In this case no stashing is happening, yet the events are still persisted and callbacks executed in the expected order: -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persistAsync-persistAsync-caller } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #nested-persistAsync-persistAsync-caller } While it is possible to nest mixed `persist` and `persistAsync` with keeping their respective semantics it is not a recommended practice, as it may lead to overly complex nesting. @@ -362,7 +362,7 @@ will most likely fail anyway, since the journal is probably unavailable. It is b actor and after a back-off timeout start it again. The `akka.pattern.BackoffSupervisor` actor is provided to support such restarts. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #backoff } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #backoff } If persistence of an event is rejected before it is stored, e.g. due to serialization error, `onPersistRejected` will be invoked (logging a warning by default), and the actor continues with @@ -475,11 +475,11 @@ Consider using explicit shut-down messages instead of `PoisonPill` when working The example below highlights how messages arrive in the Actor's mailbox and how they interact with its internal stashing mechanism when `persist()` is used. Notice the early stop behaviour that occurs when `PoisonPill` is used: -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown } -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown-example-bad } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown-example-bad } -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown-example-good } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #safe-shutdown-example-good } ### Replay Filter @@ -521,12 +521,12 @@ in context of persistent actors but this is also applicable to persistent views. Persistent actor can save snapshots of internal state by calling the `saveSnapshot` method. If saving of a snapshot succeeds, the persistent actor receives a `SaveSnapshotSuccess` message, otherwise a `SaveSnapshotFailure` message -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #save-snapshot } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #save-snapshot } During recovery, the persistent actor is offered a previously saved snapshot via a `SnapshotOffer` message from which it can initialize internal state. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #snapshot-offer } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #snapshot-offer } The replayed messages that follow the `SnapshotOffer` message, if any, are younger than the offered snapshot. They finally recover the persistent actor to its current (i.e. latest) state. @@ -534,7 +534,7 @@ They finally recover the persistent actor to its current (i.e. latest) state. In general, a persistent actor is only offered a snapshot if that persistent actor has previously saved one or more snapshots and at least one of these snapshots matches the `SnapshotSelectionCriteria` that can be specified for recovery. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #snapshot-criteria } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #snapshot-criteria } If not specified, they default to `SnapshotSelectionCriteria.latest()` which selects the latest (= youngest) snapshot. To disable snapshot-based recovery, applications should use `SnapshotSelectionCriteria.none()`. A recovery where no @@ -625,7 +625,7 @@ between `deliver` and `confirmDelivery` is possible. The `deliveryId` must do th of the message, the destination actor will send the same``deliveryId`` wrapped in a confirmation message back to the sender. The sender will then use it to call `confirmDelivery` method to complete the delivery routine. -@@snip [LambdaPersistenceDocTest.java](code/jdocs/persistence/LambdaPersistenceDocTest.java) { #at-least-once-example } +@@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #at-least-once-example } The `deliveryId` generated by the persistence module is a strictly monotonically increasing sequence number without gaps. The same sequence is used for all destinations of the actor, i.e. when sending to multiple @@ -699,11 +699,11 @@ json instead of serializing the object to its binary representation. Implementing an EventAdapter is rather stright forward: -@@snip [PersistenceEventAdapterDocTest.java](code/jdocs/persistence/PersistenceEventAdapterDocTest.java) { #identity-event-adapter } +@@snip [PersistenceEventAdapterDocTest.java]($code$/java/jdocs/persistence/PersistenceEventAdapterDocTest.java) { #identity-event-adapter } Then in order for it to be used on events coming to and from the journal you must bind it using the below configuration syntax: -@@snip [PersistenceEventAdapterDocSpec.scala](../scala/code/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #event-adapters-config } +@@snip [PersistenceEventAdapterDocSpec.scala]($code$/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #event-adapters-config } It is possible to bind multiple adapters to one class *for recovery*, in which case the `fromJournal` methods of all bound adapters will be applied to a given matching event (in order of definition in the configuration). Since each adapter may @@ -729,7 +729,7 @@ Relationship between incoming messages, FSM's states and transitions, persistenc To demonstrate the features of the `AbstractPersistentFSM`, consider an actor which represents a Web store customer. The contract of our "WebStoreCustomerFSMActor" is that it accepts the following commands: -@@snip [AbstractPersistentFSMTest.java]../../../../../akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-commands } +@@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-commands } `AddItem` sent when the customer adds an item to a shopping cart `Buy` - when the customer finishes the purchase @@ -738,7 +738,7 @@ The contract of our "WebStoreCustomerFSMActor" is that it accepts the following The customer can be in one of the following states: -@@snip [AbstractPersistentFSMTest.java]../../../../../akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-states } +@@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-states } `LookingAround` customer is browsing the site, but hasn't added anything to the shopping cart `Shopping` customer has recently added items to the shopping cart @@ -756,15 +756,15 @@ String identifiers should be unique! Customer's actions are "recorded" as a sequence of "domain events" which are persisted. Those events are replayed on actor's start in order to restore the latest customer's state: -@@snip [AbstractPersistentFSMTest.java]../../../../../akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-domain-events } +@@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-domain-events } Customer state data represents the items in customer's shopping cart: -@@snip [AbstractPersistentFSMTest.java]../../../../../akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-states-data } +@@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-states-data } Here is how everything is wired together: -@@snip [AbstractPersistentFSMTest.java]../../../../../akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-fsm-body } +@@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-fsm-body } @@@ note @@ -773,16 +773,16 @@ Override the `applyEvent` method to define how state data is affected by domain @@@ -@@snip [AbstractPersistentFSMTest.java]../../../../../akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-apply-event } +@@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-apply-event } `andThen` can be used to define actions which will be executed following event's persistence - convenient for "side effects" like sending a message or logging. Notice that actions defined in `andThen` block are not executed on recovery: -@@snip [AbstractPersistentFSMTest.java]../../../../../akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-andthen-example } +@@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-andthen-example } A snapshot of state data can be persisted by calling the `saveStateSnapshot()` method: -@@snip [AbstractPersistentFSMTest.java]../../../../../akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-snapshot-example } +@@snip [AbstractPersistentFSMTest.java]($akka$/akka-persistence/src/test/java/akka/persistence/fsm/AbstractPersistentFSMTest.java) { #customer-snapshot-example } On recovery state data is initialized according to the latest available snapshot, then the remaining domain events are replayed, triggering the `applyEvent` method. @@ -811,7 +811,7 @@ For an example of snapshot store plugin which writes snapshots as individual fil Applications can provide their own plugins by implementing a plugin API and activate them by configuration. Plugin development requires the following imports: -@@snip [LambdaPersistencePluginDocTest.java](code/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #plugin-imports } +@@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #plugin-imports } ### Eager initialization of persistence plugin @@ -827,19 +827,19 @@ A journal plugin extends `AsyncWriteJournal`. `AsyncWriteJournal` is an actor and the methods to be implemented are: -@@snip [AsyncWritePlugin.java]../../../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java) { #async-write-plugin-api } +@@snip [AsyncWritePlugin.java]($akka$/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java) { #async-write-plugin-api } If the storage backend API only supports synchronous, blocking writes, the methods should be implemented as: -@@snip [LambdaPersistencePluginDocTest.java](code/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #sync-journal-plugin-api } +@@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #sync-journal-plugin-api } A journal plugin must also implement the methods defined in `AsyncRecovery` for replays and sequence number recovery: -@@snip [AsyncRecoveryPlugin.java]../../../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java) { #async-replay-plugin-api } +@@snip [AsyncRecoveryPlugin.java]($akka$/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java) { #async-replay-plugin-api } A journal plugin can be activated with the following minimal configuration: -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #journal-plugin-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-plugin-config } The journal plugin instance is an actor so the methods corresponding to requests from persistent actors are executed sequentially. It may delegate to asynchronous libraries, spawn futures, or delegate to other @@ -863,11 +863,11 @@ Don't run journal tasks/futures on the system default dispatcher, since that mig A snapshot store plugin must extend the `SnapshotStore` actor and implement the following methods: -@@snip [SnapshotStorePlugin.java]../../../../../akka-persistence/src/main/java/akka/persistence/snapshot/japi/SnapshotStorePlugin.java) { #snapshot-store-plugin-api } +@@snip [SnapshotStorePlugin.java]($akka$/akka-persistence/src/main/java/akka/persistence/snapshot/japi/SnapshotStorePlugin.java) { #snapshot-store-plugin-api } A snapshot store plugin can be activated with the following minimal configuration: -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-plugin-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-plugin-config } The snapshot store instance is an actor so the methods corresponding to requests from persistent actors are executed sequentially. It may delegate to asynchronous libraries, spawn futures, or delegate to other @@ -904,7 +904,7 @@ The TCK is usable from Java as well as Scala projects. For Java you need to incl To include the Journal TCK tests in your test suite simply extend the provided `JavaJournalSpec`: -@@snip [LambdaPersistencePluginDocTest.java](./code/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-java } +@@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-java } Please note that some of the tests are optional, and by overriding the `supports...` methods you give the TCK the needed information about which tests to run. You can implement these methods using the provided @@ -917,12 +917,12 @@ typical scenarios. In order to include the `SnapshotStore` TCK tests in your test suite simply extend the `SnapshotStoreSpec`: -@@snip [LambdaPersistencePluginDocTest.java](./code/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #snapshot-store-tck-java } +@@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #snapshot-store-tck-java } In case your plugin requires some setting up (starting a mock database, removing temporary files etc.) you can override the `beforeAll` and `afterAll` methods to hook into the tests lifecycle: -@@snip [LambdaPersistencePluginDocTest.java](./code/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-before-after-java } +@@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-before-after-java } We *highly recommend* including these specifications in your test suite, as they cover a broad range of cases you might have otherwise forgotten to test for when writing a plugin from scratch. @@ -935,7 +935,7 @@ might have otherwise forgotten to test for when writing a plugin from scratch. The LevelDB journal plugin config entry is `akka.persistence.journal.leveldb`. It writes messages to a local LevelDB instance. Enable this plugin by defining config property: -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-plugin-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-plugin-config } LevelDB based plugins will also require the following additional dependency declaration: @@ -955,7 +955,7 @@ LevelDB based plugins will also require the following additional dependency decl The default location of LevelDB files is a directory named `journal` in the current working directory. This location can be changed by configuration where the specified path can be relative or absolute: -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #journal-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-config } With this plugin, each actor system runs its own private LevelDB instance. @@ -981,22 +981,22 @@ This plugin has been supplanted by [Persistence Plugin Proxy](#persistence-plugi A shared LevelDB instance is started by instantiating the `SharedLeveldbStore` actor. -@@snip [LambdaPersistencePluginDocTest.java](code/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #shared-store-creation } +@@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #shared-store-creation } By default, the shared instance writes journaled messages to a local directory named `journal` in the current working directory. The storage location can be changed by configuration: -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-config } Actor systems that use a shared LevelDB store must activate the `akka.persistence.journal.leveldb-shared` plugin. -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #shared-journal-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-journal-config } This plugin must be initialized by injecting the (remote) `SharedLeveldbStore` actor reference. Injection is done by calling the `SharedLeveldbJournal.setStore` method with the actor reference as argument. -@@snip [LambdaPersistencePluginDocTest.java](code/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #shared-store-usage } +@@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #shared-store-usage } Internal journal commands (sent by persistent actors) are buffered until injection completes. Injection is idempotent i.e. only the first injection is used. @@ -1007,12 +1007,12 @@ i.e. only the first injection is used. Local snapshot store plugin config entry is `akka.persistence.snapshot-store.local`. It writes snapshot files to the local filesystem. Enable this plugin by defining config property: -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-snapshot-plugin-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-snapshot-plugin-config } The default storage location is a directory named `snapshots` in the current working directory. This can be changed by configuration where the specified path can be relative or absolute: -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-config } Note that it is not mandatory to specify a snapshot store plugin. If you don't use snapshots you don't have to configure it. @@ -1066,7 +1066,7 @@ Serialization of snapshots and payloads of `Persistent` messages is configurable it must add -@@snip [PersistenceSerializerDocSpec.scala](../scala/code/docs/persistence/PersistenceSerializerDocSpec.scala) { #custom-serializer-config } +@@snip [PersistenceSerializerDocSpec.scala]($code$/scala/docs/persistence/PersistenceSerializerDocSpec.scala) { #custom-serializer-config } to the application configuration. If not specified, a default serializer is used. @@ -1076,11 +1076,11 @@ For more advanced schema evolution techniques refer to the @ref:[Persistence - S When running tests with LevelDB default settings in `sbt`, make sure to set `fork := true` in your sbt project. Otherwise, you'll see an `UnsatisfiedLinkError`. Alternatively, you can switch to a LevelDB Java port by setting -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #native-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #native-config } or -@@snip [PersistencePluginDocSpec.scala](../scala/code/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-native-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-native-config } in your Akka configuration. The LevelDB Java port is for testing purposes only. @@ -1105,18 +1105,18 @@ to the @ref:[reference configuration](../scala/general/configuration.md#config-a By default, a persistent actor or view will use "default" journal and snapshot store plugins configured in the following sections of the `reference.conf` configuration resource: -@@snip [PersistenceMultiDocSpec.scala](../scala/code/docs/persistence/PersistenceMultiDocSpec.scala) { #default-config } +@@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-config } Note that in this case the actor or view overrides only `persistenceId` method: -@@snip [PersistenceMultiDocTest.java](../java/code/jdocs/persistence/PersistenceMultiDocTest.java) { #default-plugins } +@@snip [PersistenceMultiDocTest.java]($code$/java/jdocs/persistence/PersistenceMultiDocTest.java) { #default-plugins } When a persistent actor or view overrides `journalPluginId` and `snapshotPluginId` methods, the actor or view will be serviced by these specific persistence plugins instead of the defaults: -@@snip [PersistenceMultiDocTest.java](../java/code/jdocs/persistence/PersistenceMultiDocTest.java) { #override-plugins } +@@snip [PersistenceMultiDocTest.java]($code$/java/jdocs/persistence/PersistenceMultiDocTest.java) { #override-plugins } Note that `journalPluginId` and `snapshotPluginId` must refer to properly configured `reference.conf` plugin entries with a standard `class` property as well as settings which are specific for those plugins, i.e.: -@@snip [PersistenceMultiDocSpec.scala](../scala/code/docs/persistence/PersistenceMultiDocSpec.scala) { #override-config } \ No newline at end of file +@@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #override-config } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/remoting-artery.md b/akka-docs/src/main/paradox/java/remoting-artery.md index 7e5cf19a92..9741ddc51e 100644 --- a/akka-docs/src/main/paradox/java/remoting-artery.md +++ b/akka-docs/src/main/paradox/java/remoting-artery.md @@ -209,7 +209,7 @@ which in this sample corresponds to `sampleActorSystem@127.0.0.1:2553`. Once you have configured the properties above you would do the following in code: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } The actor class `SampleActor` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. @@ -245,15 +245,15 @@ precedence. With these imports: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } and a remote address like this: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address-artery } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address-artery } you can advise the system to create a child on that remote node like so: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } ### Remote deployment whitelist @@ -268,7 +268,7 @@ The list of allowed classes has to be configured on the "remote" system, in othe others will be attempting to remote deploy Actors. That system, locally, knows best which Actors it should or should not allow others to remote deploy onto it. The full settings section may for example look like this: -@@snip [RemoteDeploymentWhitelistSpec.scala]../../../../../akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } +@@snip [RemoteDeploymentWhitelistSpec.scala]($akka$/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } Actor classes not included in the whitelist will not be allowed to be remote deployed onto this system. @@ -487,7 +487,7 @@ remained the same, we recommend reading the @ref:[Serialization](serialization.m Implementing an `akka.serialization.ByteBufferSerializer` works the same way as any other serializer, -@@snip [ByteBufferSerializerDocTest.java](code/jdocs/actor/ByteBufferSerializerDocTest.java) { #ByteBufferSerializer-interface } +@@snip [ByteBufferSerializerDocTest.java]($code$/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #ByteBufferSerializer-interface } Implementing a serializer for Artery is therefore as simple as implementing this interface, and binding the serializer as usual (which is explained in @ref:[Serialization](serialization.md)). @@ -498,7 +498,7 @@ The array based methods will be used when `ByteBuffer` is not used, e.g. in Akka Note that the array based methods can be implemented by delegation like this: -@@snip [ByteBufferSerializerDocTest.java](code/jdocs/actor/ByteBufferSerializerDocTest.java) { #bytebufserializer-with-manifest } +@@snip [ByteBufferSerializerDocTest.java]($code$/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #bytebufserializer-with-manifest } ### Disabling the Java Serializer @@ -553,14 +553,14 @@ It is absolutely feasible to combine remoting with @ref:[Routing](routing.md). A pool of remote deployed routees can be configured as: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool-artery } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool-artery } This configuration setting will clone the actor defined in the `Props` of the `remotePool` 10 times and deploy it evenly distributed across the two given target nodes. A group of remote actors can be configured as: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group-artery } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group-artery } This configuration setting will send messages to the defined remote actor paths. It requires that you create the destination actors on the remote nodes with matching paths. @@ -763,7 +763,7 @@ There are lots of configuration properties that are related to remoting in Akka. Setting properties like the listening IP and port number programmatically is best done by using something like the following: -@@snip [RemoteDeploymentDocTest.java](../java/code/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic-artery } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic-artery } @@@ diff --git a/akka-docs/src/main/paradox/java/remoting.md b/akka-docs/src/main/paradox/java/remoting.md index cb8de7b0ef..4e0f7228c6 100644 --- a/akka-docs/src/main/paradox/java/remoting.md +++ b/akka-docs/src/main/paradox/java/remoting.md @@ -136,7 +136,7 @@ which in this sample corresponds to `sampleActorSystem@127.0.0.1:2553`. Once you have configured the properties above you would do the following in code: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } The actor class `SampleActor` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. @@ -176,15 +176,15 @@ precedence. With these imports: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } and a remote address like this: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address } you can advise the system to create a child on that remote node like so: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } ### Remote deployment whitelist @@ -200,7 +200,7 @@ The list of allowed classes has to be configured on the "remote" system, in othe others will be attempting to remote deploy Actors. That system, locally, knows best which Actors it should or should not allow others to remote deploy onto it. The full settings section may for example look like this: -@@snip [RemoteDeploymentWhitelistSpec.scala]../../../../../akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } +@@snip [RemoteDeploymentWhitelistSpec.scala]($akka$/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } Actor classes not included in the whitelist will not be allowed to be remote deployed onto this system. @@ -352,14 +352,14 @@ It is absolutely feasible to combine remoting with @ref:[Routing](routing.md). A pool of remote deployed routees can be configured as: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool } This configuration setting will clone the actor defined in the `Props` of the `remotePool` 10 times and deploy it evenly distributed across the two given target nodes. A group of remote actors can be configured as: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } This configuration setting will send messages to the defined remote actor paths. It requires that you create the destination actors on the remote nodes with matching paths. @@ -599,7 +599,7 @@ There are lots of configuration properties that are related to remoting in Akka. Setting properties like the listening IP and port number programmatically is best done by using something like the following: -@@snip [RemoteDeploymentDocTest.java](code/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic } @@@ diff --git a/akka-docs/src/main/paradox/java/routing.md b/akka-docs/src/main/paradox/java/routing.md index eb16b1ef83..844abf101a 100644 --- a/akka-docs/src/main/paradox/java/routing.md +++ b/akka-docs/src/main/paradox/java/routing.md @@ -13,7 +13,7 @@ also possible to [create your own](#custom-router-java). The following example illustrates how to use a `Router` and manage the routees from within an actor. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #router-in-actor } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #router-in-actor } We create a `Router` and specify that it should use `RoundRobinRoutingLogic` when routing the messages to the routees. @@ -81,14 +81,14 @@ few exceptions. These are documented in the [Specially Handled Messages](#router The following code and configuration snippets show how to create a [round-robin](#round-robin-router-java) router that forwards messages to five `Worker` routees. The routees will be created as the router's children. -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #round-robin-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-1 } Here is the same example, but with the router configuration provided programmatically instead of from configuration. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #round-robin-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-2 } #### Remote Deployed Routees @@ -98,20 +98,20 @@ fashion. In order to deploy routees remotely, wrap the router configuration in a `RemoteRouterConfig`, attaching the remote addresses of the nodes to deploy to. Remote deployment requires the `akka-remote` module to be included in the classpath. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #remoteRoutees } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #remoteRoutees } #### Senders When a routee sends a message, it can @ref:[set itself as the sender ](actors.md#actors-tell-sender-java). -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #reply-with-self } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #reply-with-self } However, it is often useful for routees to set the *router* as a sender. For example, you might want to set the router as the sender if you want to hide the details of the routees behind the router. The following code snippet shows how to set the parent router as sender. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #reply-with-parent } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #reply-with-parent } #### Supervision @@ -139,7 +139,7 @@ by specifying the strategy when defining the router. Setting the strategy is easily done: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #supervision } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #supervision } @@@ note @@ -160,25 +160,25 @@ to these paths. The example below shows how to create a router by providing it with the path strings of three routee actors. -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #round-robin-group-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-group-1 } Here is the same example, but with the router configuration provided programmatically instead of from configuration. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #round-robin-group-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-group-2 } The routee actors are created externally from the router: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #create-workers } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #create-workers } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #create-worker-actors } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #create-worker-actors } The paths may contain protocol and address information for actors running on remote hosts. Remoting requires the `akka-remote` module to be included in the classpath. -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } ## Router usage @@ -188,7 +188,7 @@ The router actors in this section are created from within a top level actor name Note that deployment paths in the configuration starts with `/parent/` followed by the name of the router actor. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #create-parent } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #create-parent } ### RoundRobinPool and RoundRobinGroup @@ -197,23 +197,23 @@ Routes in a [round-robin](http://en.wikipedia.org/wiki/Round-robin) fashion to i RoundRobinPool defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #round-robin-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-1 } RoundRobinPool defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #round-robin-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-pool-2 } RoundRobinGroup defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #round-robin-group-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #round-robin-group-1 } RoundRobinGroup defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #paths #round-robin-group-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #round-robin-group-2 } ### RandomPool and RandomGroup @@ -221,23 +221,23 @@ This router type selects one of its routees randomly for each message. RandomPool defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-random-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-random-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #random-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #random-pool-1 } RandomPool defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #random-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #random-pool-2 } RandomGroup defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-random-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-random-group } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #random-group-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #random-group-1 } RandomGroup defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #paths #random-group-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #random-group-2 } ### BalancingPool @@ -269,19 +269,19 @@ as described in [Specially Handled Messages](#router-special-messages-java). BalancingPool defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-balancing-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #balancing-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #balancing-pool-1 } BalancingPool defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #balancing-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #balancing-pool-2 } Addition configuration for the balancing dispatcher, which is used by the pool, can be configured in the `pool-dispatcher` section of the router deployment configuration. -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-balancing-pool2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool2 } The `BalancingPool` automatically uses a special `BalancingDispatcher` for its routees - disregarding any dispatcher that is set on the routee Props object. @@ -294,14 +294,14 @@ can be configured as explained in @ref:[Dispatchers](dispatchers.md). In situati routees are expected to perform blocking operations it may be useful to replace it with a `thread-pool-executor` hinting the number of allocated threads explicitly: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-balancing-pool3 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool3 } It is also possible to change the `mailbox` used by the balancing dispatcher for scenarios where the default unbounded mailbox is not well suited. An example of such a scenario could arise whether there exists the need to manage priority for each message. You can then implement a priority mailbox and configure your dispatcher: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-balancing-pool4 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool4 } @@@ note @@ -328,13 +328,13 @@ since their mailbox size is unknown SmallestMailboxPool defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-smallest-mailbox-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-smallest-mailbox-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #smallest-mailbox-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #smallest-mailbox-pool-1 } SmallestMailboxPool defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #smallest-mailbox-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #smallest-mailbox-pool-2 } There is no Group variant of the SmallestMailboxPool because the size of the mailbox and the internal dispatching state of the actor is not practically available from the paths @@ -346,23 +346,23 @@ A broadcast router forwards the message it receives to *all* its routees. BroadcastPool defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-broadcast-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-broadcast-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #broadcast-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcast-pool-1 } BroadcastPool defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #broadcast-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcast-pool-2 } BroadcastGroup defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-broadcast-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-broadcast-group } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #broadcast-group-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcast-group-1 } BroadcastGroup defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #paths #broadcast-group-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #broadcast-group-2 } @@@ note @@ -383,23 +383,23 @@ It is expecting at least one reply within a configured duration, otherwise it wi ScatterGatherFirstCompletedPool defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #scatter-gather-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-pool-1 } ScatterGatherFirstCompletedPool defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #scatter-gather-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-pool-2 } ScatterGatherFirstCompletedGroup defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-group } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #scatter-gather-group-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #scatter-gather-group-1 } ScatterGatherFirstCompletedGroup defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #paths #scatter-gather-group-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #scatter-gather-group-2 } ### TailChoppingPool and TailChoppingGroup @@ -415,23 +415,23 @@ This optimisation was described nicely in a blog post by Peter Bailis: TailChoppingPool defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #tail-chopping-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-pool-1 } TailChoppingPool defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #tail-chopping-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-pool-2 } TailChoppingGroup defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-group } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #tail-chopping-group-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #tail-chopping-group-1 } TailChoppingGroup defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #paths #tail-chopping-group-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #tail-chopping-group-2 } ### ConsistentHashingPool and ConsistentHashingGroup @@ -457,9 +457,9 @@ the same time for one router. The `withHashMapper` is tried first. Code example: -@@snip [ConsistentHashingRouterDocTest.java](code/jdocs/routing/ConsistentHashingRouterDocTest.java) { #cache-actor } +@@snip [ConsistentHashingRouterDocTest.java]($code$/java/jdocs/routing/ConsistentHashingRouterDocTest.java) { #cache-actor } -@@snip [ConsistentHashingRouterDocTest.java](code/jdocs/routing/ConsistentHashingRouterDocTest.java) { #consistent-hashing-router } +@@snip [ConsistentHashingRouterDocTest.java]($code$/java/jdocs/routing/ConsistentHashingRouterDocTest.java) { #consistent-hashing-router } In the above example you see that the `Get` message implements `ConsistentHashable` itself, while the `Entry` message is wrapped in a `ConsistentHashableEnvelope`. The `Evict` @@ -467,23 +467,23 @@ message is handled by the `hashMapping` partial function. ConsistentHashingPool defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #consistent-hashing-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-pool-1 } ConsistentHashingPool defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #consistent-hashing-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-pool-2 } ConsistentHashingGroup defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-group } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #consistent-hashing-group-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #consistent-hashing-group-1 } ConsistentHashingGroup defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #paths #consistent-hashing-group-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #paths #consistent-hashing-group-2 } `virtual-nodes-factor` is the number of virtual nodes per routee that is used in the consistent hash node ring to make the distribution more uniform. @@ -508,7 +508,7 @@ matter how that router would normally route its messages. The example below shows how you would use a `Broadcast` message to send a very important message to every routee of a router. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #broadcastDavyJonesWarning } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcastDavyJonesWarning } In this example the router receives the `Broadcast` message, extracts its payload (`"Watch out for Davy Jones' locker"`), and then sends the payload on to all of the router's @@ -528,7 +528,7 @@ A `PoisonPill` message has special handling for all actors, including for router receives a `PoisonPill` message, that actor will be stopped. See the @ref:[PoisonPill](actors.md#poison-pill-java) documentation for details. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #poisonPill } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #poisonPill } For a router, which normally passes on messages to routees, it is important to realise that `PoisonPill` messages are processed by the router only. `PoisonPill` messages sent to a router @@ -546,7 +546,7 @@ router. Instead you should wrap a `PoisonPill` message inside a `Broadcast` mess routee will receive the `PoisonPill` message. Note that this will stop all routees, even if the routees aren't children of the router, i.e. even routees programmatically provided to the router. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #broadcastPoisonPill } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcastPoisonPill } With the code shown above, each routee will receive a `PoisonPill` message. Each routee will continue to process its messages as normal, eventually processing the `PoisonPill`. This will @@ -575,14 +575,14 @@ Routees that are children of the router will also be suspended, and will be affe supervision directive that is applied to the router. Routees that are not the routers children, i.e. those that were created externally to the router, will not be affected. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #kill } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #kill } As with the `PoisonPill` message, there is a distinction between killing a router, which indirectly kills its children (who happen to be routees), and killing routees directly (some of whom may not be children.) To kill routees directly the router should be sent a `Kill` message wrapped in a `Broadcast` message. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #broadcastKill } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #broadcastKill } ### Management Messages @@ -614,16 +614,16 @@ pressure is lower than certain threshold. Both thresholds are configurable. Pool with default resizer defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-resize-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-resize-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #resize-pool-1 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #resize-pool-1 } Several more configuration options are available and described in `akka.actor.deployment.default.resizer` section of the reference configuration. Pool with resizer defined in code: -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #resize-pool-2 } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #resize-pool-2 } *It is also worth pointing out that if you define the ``router`` in the configuration file then this value will be used instead of any programmatically sent parameters.* @@ -655,9 +655,9 @@ The memory usage is O(n) where n is the number of sizes you allow, i.e. upperBou Pool with `OptimalSizeExploringResizer` defined in configuration: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-optimal-size-exploring-resize-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-optimal-size-exploring-resize-pool } -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #optimal-size-exploring-resize-pool } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #optimal-size-exploring-resize-pool } Several more configuration options are available and described in `akka.actor.deployment.default.optimal-size-exploring-resizer` section of the reference configuration. @@ -711,7 +711,7 @@ The router created in this example is replicating each message to a few destinat Start with the routing logic: -@@snip [CustomRouterDocTest.java](code/jdocs/routing/CustomRouterDocTest.java) { #routing-logic } +@@snip [CustomRouterDocTest.java]($code$/java/jdocs/routing/CustomRouterDocTest.java) { #routing-logic } `select` will be called for each message and in this example pick a few destinations by round-robin, by reusing the existing `RoundRobinRoutingLogic` and wrap the result in a `SeveralRoutees` @@ -721,7 +721,7 @@ The implementation of the routing logic must be thread safe, since it might be u A unit test of the routing logic: -@@snip [CustomRouterDocTest.java](code/jdocs/routing/CustomRouterDocTest.java) { #unit-test-logic } +@@snip [CustomRouterDocTest.java]($code$/java/jdocs/routing/CustomRouterDocTest.java) { #unit-test-logic } You could stop here and use the `RedundancyRoutingLogic` with a `akka.routing.Router` as described in [A Simple Router](#simple-router-java). @@ -731,23 +731,23 @@ Let us continue and make this into a self contained, configurable, router actor. Create a class that extends `PoolBase`, `GroupBase` or `CustomRouterConfig`. That class is a factory for the routing logic and holds the configuration for the router. Here we make it a `Group`. -@@snip [RedundancyGroup.java](code/jdocs/routing/RedundancyGroup.java) { #group } +@@snip [RedundancyGroup.java]($code$/java/jdocs/routing/RedundancyGroup.java) { #group } This can be used exactly as the router actors provided by Akka. -@@snip [CustomRouterDocTest.java](code/jdocs/routing/CustomRouterDocTest.java) { #usage-1 } +@@snip [CustomRouterDocTest.java]($code$/java/jdocs/routing/CustomRouterDocTest.java) { #usage-1 } Note that we added a constructor in `RedundancyGroup` that takes a `Config` parameter. That makes it possible to define it in configuration. -@@snip [CustomRouterDocSpec.scala](../scala/code/docs/routing/CustomRouterDocSpec.scala) { #jconfig } +@@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #jconfig } Note the fully qualified class name in the `router` property. The router class must extend `akka.routing.RouterConfig` (`Pool`, `Group` or `CustomRouterConfig`) and have constructor with one `com.typesafe.config.Config` parameter. The deployment section of the configuration is passed to the constructor. -@@snip [CustomRouterDocTest.java](code/jdocs/routing/CustomRouterDocTest.java) { #usage-2 } +@@snip [CustomRouterDocTest.java]($code$/java/jdocs/routing/CustomRouterDocTest.java) { #usage-2 } ## Configuring Dispatchers @@ -757,7 +757,7 @@ The dispatcher for created children of the pool will be taken from To make it easy to define the dispatcher of the routees of the pool you can define the dispatcher inline in the deployment section of the config. -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-pool-dispatcher } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-pool-dispatcher } That is the only thing you need to do enable a dedicated dispatcher for a pool. @@ -778,7 +778,7 @@ the actor system’s default dispatcher. All standard routers allow setting this property in their constructor or factory method, custom routers have to implement the method in a suitable way. -@@snip [RouterDocTest.java](code/jdocs/routing/RouterDocTest.java) { #dispatchers } +@@snip [RouterDocTest.java]($code$/java/jdocs/routing/RouterDocTest.java) { #dispatchers } @@@ note diff --git a/akka-docs/src/main/paradox/java/scheduler.md b/akka-docs/src/main/paradox/java/scheduler.md index 38afdd3ef2..9450dd6364 100644 --- a/akka-docs/src/main/paradox/java/scheduler.md +++ b/akka-docs/src/main/paradox/java/scheduler.md @@ -38,13 +38,13 @@ by the `akka.scheduler.tick-duration` configuration property. Schedule to send the "foo"-message to the testActor after 50ms: -@@snip [SchedulerDocTest.java](code/jdocs/actor/SchedulerDocTest.java) { #imports1 } +@@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #imports1 } -@@snip [SchedulerDocTest.java](code/jdocs/actor/SchedulerDocTest.java) { #schedule-one-off-message } +@@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #schedule-one-off-message } Schedule a Runnable, that sends the current time to the testActor, to be executed after 50ms: -@@snip [SchedulerDocTest.java](code/jdocs/actor/SchedulerDocTest.java) { #schedule-one-off-thunk } +@@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #schedule-one-off-thunk } @@@ warning @@ -59,13 +59,13 @@ variant accepting a message and an `ActorRef` to schedule a message to self Schedule to send the "Tick"-message to the `tickActor` after 0ms repeating every 50ms: -@@snip [SchedulerDocTest.java](code/jdocs/actor/SchedulerDocTest.java) { #imports1 #imports2 } +@@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #imports1 #imports2 } -@@snip [SchedulerDocTest.java](code/jdocs/actor/SchedulerDocTest.java) { #schedule-recurring } +@@snip [SchedulerDocTest.java]($code$/java/jdocs/actor/SchedulerDocTest.java) { #schedule-recurring } ## From `akka.actor.ActorSystem` -@@snip [ActorSystem.scala]../../../../../akka-actor/src/main/scala/akka/actor/ActorSystem.scala) { #scheduler } +@@snip [ActorSystem.scala]($akka$/akka-actor/src/main/scala/akka/actor/ActorSystem.scala) { #scheduler } @@@ warning @@ -81,7 +81,7 @@ The actual scheduler implementation is loaded reflectively upon different one using the `akka.scheduler.implementation` configuration property. The referenced class must implement the following interface: -@@snip [AbstractScheduler.java]../../../../../akka-actor/src/main/java/akka/actor/AbstractScheduler.java) { #scheduler } +@@snip [AbstractScheduler.java]($akka$/akka-actor/src/main/java/akka/actor/AbstractScheduler.java) { #scheduler } ## The Cancellable interface @@ -97,4 +97,4 @@ scheduled task was canceled or will (eventually) have run. @@@ -@@snip [Scheduler.scala]../../../../../akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #cancellable } \ No newline at end of file +@@snip [Scheduler.scala]($akka$/akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #cancellable } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/serialization.md b/akka-docs/src/main/paradox/java/serialization.md index 2d02f2e79c..1bb912aaf9 100644 --- a/akka-docs/src/main/paradox/java/serialization.md +++ b/akka-docs/src/main/paradox/java/serialization.md @@ -12,12 +12,12 @@ For Akka to know which `Serializer` to use for what, you need edit your [Configu in the "akka.actor.serializers"-section you bind names to implementations of the `akka.serialization.Serializer` you wish to use, like this: -@@snip [SerializationDocSpec.scala](../scala/code/docs/serialization/SerializationDocSpec.scala) { #serialize-serializers-config } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-serializers-config } After you've bound names to different implementations of `Serializer` you need to wire which classes should be serialized using which `Serializer`, this is done in the "akka.actor.serialization-bindings"-section: -@@snip [SerializationDocSpec.scala](../scala/code/docs/serialization/SerializationDocSpec.scala) { #serialization-bindings-config } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialization-bindings-config } You only need to specify the name of an interface or abstract base class of the messages. In case of ambiguity, i.e. the message implements several of the @@ -54,11 +54,11 @@ akka.actor.serialization-bindings { Normally, messages sent between local actors (i.e. same JVM) do not undergo serialization. For testing, sometimes, it may be desirable to force serialization on all messages (both remote and local). If you want to do this in order to verify that your messages are serializable you can enable the following config option: -@@snip [SerializationDocSpec.scala](../scala/code/docs/serialization/SerializationDocSpec.scala) { #serialize-messages-config } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-messages-config } If you want to verify that your `Props` are serializable you can enable the following config option: -@@snip [SerializationDocSpec.scala](../scala/code/docs/serialization/SerializationDocSpec.scala) { #serialize-creators-config } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-creators-config } @@@ warning @@ -71,9 +71,9 @@ We recommend having these config options turned on **only** when you're running If you want to programmatically serialize/deserialize using Akka Serialization, here's some examples: -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #imports } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #imports } -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #programmatic } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #programmatic } For more information, have a look at the `ScalaDoc` for `akka.serialization._` @@ -85,9 +85,9 @@ The first code snippet on this page contains a configuration file that reference A custom `Serializer` has to inherit from `akka.serialization.JSerializer` and can be defined like the following: -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #imports } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #imports } -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #my-own-serializer } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #my-own-serializer } The manifest is a type hint so that the same serializer can be used for different classes. The manifest parameter in `fromBinaryJava` is the class of the object that @@ -116,7 +116,7 @@ class name if you used `includeManifest=true`, otherwise it will be the empty st This is how a `SerializerWithStringManifest` looks like: -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #my-own-serializer2 } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #my-own-serializer2 } You must also bind it to a name in your [Configuration]() and then list which classes that should be serialized using it. @@ -138,9 +138,9 @@ In the general case, the local address to be used depends on the type of remote address which shall be the recipient of the serialized information. Use `Serialization.serializedActorPath(actorRef)` like this: -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #imports } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #imports } -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #actorref-serializer } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #actorref-serializer } This assumes that serialization happens in the context of sending a message through the remote transport. There are other uses of serialization, though, @@ -155,7 +155,7 @@ transport per se, which makes this question a bit more interesting. To find out the appropriate address to use when sending to `remoteAddr` you can use `ActorRefProvider.getExternalAddressFor(remoteAddr)` like this: -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #external-address } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #external-address } @@@ note @@ -181,7 +181,7 @@ lenient as Akka’s RemoteActorRefProvider). There is also a default remote address which is the one used by cluster support (and typical systems have just this one); you can get it like this: -@@snip [SerializationDocTest.java](code/jdocs/serialization/SerializationDocTest.java) { #external-address-default } +@@snip [SerializationDocTest.java]($code$/java/jdocs/serialization/SerializationDocTest.java) { #external-address-default } ### Deep serialization of Actors diff --git a/akka-docs/src/main/paradox/java/stream/stream-composition.md b/akka-docs/src/main/paradox/java/stream/stream-composition.md index d909a627f7..d2f6442b21 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-composition.md +++ b/akka-docs/src/main/paradox/java/stream/stream-composition.md @@ -78,7 +78,7 @@ with the rest of the graph), but this demonstrates the uniform underlying model. If we try to build a code snippet that corresponds to the above diagram, our first try might look like this: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #non-nested-flow } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #non-nested-flow } It is clear however that there is no nesting present in our first attempt, since the library cannot figure out where we intended to put composite module boundaries, it is our responsibility to do that. If we are using the @@ -87,7 +87,7 @@ methods `withAttributes()` or `named()` (where the latter is just a shorthand fo The following code demonstrates how to achieve the desired nesting: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #nested-flow } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #nested-flow } Once we have hidden the internals of our components, they act like any other built-in component of similar shape. If we hide some of the internals of our composites, the result looks just like if any other predefine component has been @@ -102,7 +102,7 @@ used: If we look at usage of built-in components, and our custom components, there is no difference in usage as the code snippet below demonstrates. -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #reuse } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #reuse } ## Composing complex systems @@ -126,12 +126,12 @@ can be materialized) that encapsulates a non-trivial stream processing network. directed and non-directed cycles. The `runnable()` method of the `GraphDSL` factory object allows the creation of a general, closed, and runnable graph. For example the network on the diagram can be realized like this: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #complex-graph } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #complex-graph } In the code above we used the implicit port numbering feature to make the graph more readable and similar to the diagram. It is possible to refer to the ports, so another version might look like this: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #complex-graph-alt } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #complex-graph-alt } | @@ -148,7 +148,7 @@ from the previous example, what remains is a partial graph: We can recreate a similar graph in code, using the DSL in a similar way than before: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #partial-graph } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #partial-graph } The only new addition is the return value of the builder block, which is a `Shape`. All graphs (including `Source`, `BidiFlow`, etc) have a shape, which encodes the *typed* ports of the module. In our example @@ -167,7 +167,7 @@ it is a good practice to give names to modules to help debugging. Since our partial graph has the right shape, it can be already used in the simpler, linear DSL: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #partial-use } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #partial-use } It is not possible to use it as a `Flow` yet, though (i.e. we cannot call `.filter()` on it), but `Flow` has a `fromGraph()` method that just adds the DSL to a `FlowShape`. There are similar methods on `Source`, @@ -183,7 +183,7 @@ To demonstrate this, we will create the following graph: The code version of the above closed graph might look like this: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #partial-flow-dsl } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #partial-flow-dsl } @@@ note @@ -195,7 +195,7 @@ throw an exception if this is violated. We are still in debt of demonstrating that `RunnableGraph` is a component just like any other, which can be embedded in graphs. In the following snippet we embed one closed graph in another: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #embed-closed } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #embed-closed } The type of the imported module indicates that the imported module has a `ClosedShape`, and so we are not able to wire it to anything else inside the enclosing closed graph. Nevertheless, this "island" is embedded properly, @@ -246,29 +246,29 @@ To implement the above, first, we create a composite `Source`, where the enclose materialized type of `CompletableFuture>>`. By using the combiner function `Keep.left()`, the resulting materialized type is of the nested module (indicated by the color *red* on the diagram): -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #mat-combine-1 } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-1 } Next, we create a composite `Flow` from two smaller components. Here, the second enclosed `Flow` has a materialized type of `CompletionStage`, and we propagate this to the parent by using `Keep.right()` as the combiner function (indicated by the color *yellow* on the diagram): -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #mat-combine-2 } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-2 } As a third step, we create a composite `Sink`, using our `nestedFlow` as a building block. In this snippet, both the enclosed `Flow` and the folding `Sink` has a materialized value that is interesting for us, so we use `Keep.both()` to get a `Pair` of them as the materialized type of `nestedSink` (indicated by the color *blue* on the diagram) -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #mat-combine-3 } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-3 } As the last example, we wire together `nestedSource` and `nestedSink` and we use a custom combiner function to create a yet another materialized type of the resulting `RunnableGraph`. This combiner function just ignores the `CompletionStage` part, and wraps the other two values in a custom case class `MyClass` (indicated by color *purple* on the diagram): -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #mat-combine-4a } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-4a } -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #mat-combine-4b } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #mat-combine-4b } @@@ note @@ -290,7 +290,7 @@ by nested modules, unless they override them with a custom value. The code below, a modification of an earlier example sets the `inputBuffer` attribute on certain modules, but not on others: -@@snip [CompositionDocTest.java](../code/jdocs/stream/CompositionDocTest.java) { #attributes-inheritance } +@@snip [CompositionDocTest.java]($code$/java/jdocs/stream/CompositionDocTest.java) { #attributes-inheritance } The effect is, that each module inherits the `inputBuffer` attribute from its enclosing parent, unless it has the same attribute explicitly set. `nestedSource` gets the default attributes from the materializer itself. `nestedSink` diff --git a/akka-docs/src/main/paradox/java/stream/stream-cookbook.md b/akka-docs/src/main/paradox/java/stream/stream-cookbook.md index 64567b51ff..88224a1a56 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-cookbook.md +++ b/akka-docs/src/main/paradox/java/stream/stream-cookbook.md @@ -25,12 +25,12 @@ general, more targeted recipes are available as separate sections (@ref:[Buffers The simplest solution is to simply use a `map` operation and use `println` to print the elements received to the console. While this recipe is rather simplistic, it is often suitable for a quick debug session. -@@snip [RecipeLoggingElements.java](../code/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #println-debug } +@@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #println-debug } Another approach to logging is to use `log()` operation which allows configuring logging for elements flowing through the stream as well as completion and erroring. -@@snip [RecipeLoggingElements.java](../code/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #log-custom } +@@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #log-custom } ### Flattening a stream of sequences @@ -41,7 +41,7 @@ The `mapConcat` operation can be used to implement a one-to-many transformation in the form of `In -> List`. In this case we want to map a `List` of elements to the elements in the collection itself, so we can just call `mapConcat(l -> l)`. -@@snip [RecipeFlattenList.java](../code/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java) { #flattening-lists } +@@snip [RecipeFlattenList.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java) { #flattening-lists } ### Draining a stream to a strict collection @@ -54,11 +54,11 @@ The function `limit` or `take` should always be used in conjunction in order to For example, this is best avoided: -@@snip [RecipeSeq.java](../code/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-unsafe } +@@snip [RecipeSeq.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-unsafe } Rather, use `limit` or `take` to ensure that the resulting `List` will contain only up to `MAX_ALLOWED_SIZE` elements: -@@snip [RecipeSeq.java](../code/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-safe } +@@snip [RecipeSeq.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-safe } ### Calculating the digest of a ByteString stream @@ -75,9 +75,9 @@ At this point we want to emit the digest value, but we cannot do it with `push` be no downstream demand. Instead we call `emit` which will temporarily replace the handlers, emit the provided value when demand comes in and then reset the stage state. It will then complete the stage. -@@snip [RecipeDigest.java](../code/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest } +@@snip [RecipeDigest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest } -@@snip [RecipeDigest.java](../code/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest2 } +@@snip [RecipeDigest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest2 } ### Parsing lines from a stream of ByteStrings @@ -88,7 +88,7 @@ needs to be parsed. The `Framing` helper class contains a convenience method to parse messages from a stream of `ByteString` s: -@@snip [RecipeParseLines.java](../code/jdocs/stream/javadsl/cookbook/RecipeParseLines.java) { #parse-lines } +@@snip [RecipeParseLines.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeParseLines.java) { #parse-lines } ### Dealing with compressed data streams @@ -97,7 +97,7 @@ The `Framing` helper class contains a convenience method to parse messages from The `Compression` helper class contains convenience methods for decompressing data streams compressed with Gzip or Deflate. -@@snip [RecipeDecompress.java](../code/jdocs/stream/javadsl/cookbook/RecipeDecompress.java) { #decompress-gzip } +@@snip [RecipeDecompress.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDecompress.java) { #decompress-gzip } ### Implementing reduce-by-key @@ -126,7 +126,7 @@ If the `groupBy` operator encounters more keys than this number then the stream cannot continue without violating its resource bound, in this case `groupBy` will terminate with a failure. -@@snip [RecipeReduceByKeyTest.java](../code/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #word-count } +@@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #word-count } By extracting the parts specific to *wordcount* into @@ -136,9 +136,9 @@ By extracting the parts specific to *wordcount* into we get a generalized version below: -@@snip [RecipeReduceByKeyTest.java](../code/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general } +@@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general } -@@snip [RecipeReduceByKeyTest.java](../code/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general2 } +@@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general2 } @@@ note @@ -161,7 +161,7 @@ will be emitted. This is achieved by using `mapConcat` * Then we take this new stream of message topic pairs (containing a separate pair for each topic a given message belongs to) and feed it into groupBy, using the topic as the group key. -@@snip [RecipeMultiGroupByTest.java](../code/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java) { #multi-groupby } +@@snip [RecipeMultiGroupByTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java) { #multi-groupby } ## Working with Graphs @@ -176,14 +176,14 @@ trigger signal arrives. This recipe solves the problem by simply zipping the stream of `Message` elements with the stream of `Trigger` signals. Since `Zip` produces pairs, we simply map the output stream selecting the first element of the pair. -@@snip [RecipeManualTrigger.java](../code/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream } +@@snip [RecipeManualTrigger.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream } Alternatively, instead of using a `Zip`, and then using `map` to get the first element of the pairs, we can avoid creating the pairs in the first place by using `ZipWith` which takes a two argument function to produce the output element. If this function would return a pair of the two argument it would be exactly the behavior of `Zip` so `ZipWith` is a generalization of zipping. -@@snip [RecipeManualTrigger.java](../code/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream-zipwith } +@@snip [RecipeManualTrigger.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream-zipwith } ### Balancing jobs to a fixed pool of workers @@ -200,9 +200,9 @@ we wire the outputs of these workers to a `Merge` element that will collect the To make the worker stages run in parallel we mark them as asynchronous with *async()*. -@@snip [RecipeWorkerPool.java](../code/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool } +@@snip [RecipeWorkerPool.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool } -@@snip [RecipeWorkerPool.java](../code/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool2 } +@@snip [RecipeWorkerPool.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool2 } ## Working with rate @@ -221,7 +221,7 @@ the speed of the upstream unaffected by the downstream. When the upstream is faster, the reducing process of the `conflate` starts. Our reducer function simply takes the freshest element. This in a simple dropping operation. -@@snip [RecipeSimpleDrop.java](../code/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java) { #simple-drop } +@@snip [RecipeSimpleDrop.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java) { #simple-drop } There is a version of `conflate` named `conflateWithSeed` that allows to express more complex aggregations, more similar to a `fold`. @@ -238,9 +238,9 @@ defining a dropping strategy instead of the default `Backpressure`. This allows between the different consumers (the buffer smooths out small rate variances), but also allows faster consumers to progress by dropping from the buffer of the slow consumers if necessary. -@@snip [RecipeDroppyBroadcast.java](../code/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast } +@@snip [RecipeDroppyBroadcast.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast } -@@snip [RecipeDroppyBroadcast.java](../code/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast2 } +@@snip [RecipeDroppyBroadcast.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast2 } ### Collecting missed ticks @@ -259,7 +259,7 @@ count of the missed ticks so far. As a result, we have a flow of `Int` where the number represents the missed ticks. A number 0 means that we were able to consume the tick fast enough (i.e. zero means: 1 non-missed tick + 0 missed ticks) -@@snip [RecipeMissedTicks.java](../code/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java) { #missed-ticks } +@@snip [RecipeMissedTicks.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java) { #missed-ticks } ### Create a stream processor that repeats the last element seen @@ -273,7 +273,7 @@ to feed the downstream if no upstream element is ready yet. In the `onPush()` ha `currentValue` variable and immediately relieve the upstream by calling `pull()`. The downstream `onPull` handler is very similar, we immediately relieve the downstream by emitting `currentValue`. -@@snip [RecipeHold.java](../code/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-1 } +@@snip [RecipeHold.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-1 } While it is relatively simple, the drawback of the first version is that it needs an arbitrary initial element which is not always possible to provide. Hence, we create a second version where the downstream might need to wait in one single @@ -286,7 +286,7 @@ version is that we check if we have received the first value and only emit if we first element comes in we must check if there possibly already was demand from downstream so that we in that case can push the element directly. -@@snip [RecipeHold.java](../code/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-2 } +@@snip [RecipeHold.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-2 } ### Globally limiting the rate of a set of streams @@ -306,13 +306,13 @@ of the sender is added to a queue. Once the timer for replenishing the pending p message, we increment the pending permits counter and send a reply to each of the waiting senders. If there are more waiting senders than permits available we will stay in the `closed` state. -@@snip [RecipeGlobalRateLimit.java](../code/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-actor } +@@snip [RecipeGlobalRateLimit.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-actor } To create a Flow that uses this global limiter actor we use the `mapAsync` function with the combination of the `ask` pattern. We also define a timeout, so if a reply is not received during the configured maximum wait period the returned future from `ask` will fail, which will fail the corresponding stream as well. -@@snip [RecipeGlobalRateLimit.java](../code/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-flow } +@@snip [RecipeGlobalRateLimit.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-flow } @@@ note @@ -339,9 +339,9 @@ and an empty or nonempty remaining buffer. Both `onPush()` and `onPull()` calls `emitChunk()` the only difference is that the push handler also stores the incoming chunk by appending to the end of the buffer. -@@snip [RecipeByteStrings.java](../code/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker } +@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker } -@@snip [RecipeByteStrings.java](../code/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker2 } +@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker2 } ### Limit the number of bytes passing through a stream of ByteStrings @@ -352,9 +352,9 @@ This recipe uses a `GraphStage` to implement the desired feature. In the only ha `onPush()` we just update a counter and see if it gets larger than `maximumBytes`. If a violation happens we signal failure, otherwise we forward the chunk we have received. -@@snip [RecipeByteStrings.java](../code/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter } +@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter } -@@snip [RecipeByteStrings.java](../code/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter2 } +@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter2 } ### Compact ByteStrings in a stream of ByteStrings @@ -365,7 +365,7 @@ chain we want to have clean copies that are no longer referencing the original ` The recipe is a simple use of map, calling the `compact()` method of the `ByteString` elements. This does copying of the underlying arrays, so this should be the last element of a long chain if used. -@@snip [RecipeByteStrings.java](../code/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #compacting-bytestrings } +@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #compacting-bytestrings } ### Injecting keep-alive messages into a stream of ByteStrings @@ -374,4 +374,4 @@ but only if this does not interfere with normal traffic. There is a built-in operation that allows to do this directly: -@@snip [RecipeKeepAlive.java](../code/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java) { #inject-keepalive } \ No newline at end of file +@@snip [RecipeKeepAlive.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java) { #inject-keepalive } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-customize.md b/akka-docs/src/main/paradox/java/stream/stream-customize.md index ded346e4cb..12463c00fb 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-customize.md +++ b/akka-docs/src/main/paradox/java/stream/stream-customize.md @@ -25,7 +25,7 @@ As a first motivating example, we will build a new `Source` that will simply emi cancelled. To start, we need to define the "interface" of our stage, which is called *shape* in Akka Streams terminology (this is explained in more detail in the section @ref:[Modularity, Composition and Hierarchy](stream-composition.md)). -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #simple-source } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #simple-source } As you see, in itself the `GraphStage` only defines the ports of this stage and a shape that contains the ports. It also has a user implemented method called `createLogic`. If you recall, stages are reusable in multiple @@ -54,7 +54,7 @@ that they are already usable in many situations, but do not provide the DSL meth `Source.fromGraph` (see @ref:[Modularity, Composition and Hierarchy](stream-composition.md) for more details about graphs and DSLs). Now we can use the source as any other built-in one: -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #simple-source-usage } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #simple-source-usage } Similarly, to create a custom `Sink` one can register a subclass `InHandler` with the stage `Inlet`. The `onPush()` callback is used to signal the handler a new element has been pushed to the stage, @@ -62,7 +62,7 @@ and can hence be grabbed and used. `onPush()` can be overridden to provide custo Please note, most Sinks would need to request upstream elements as soon as they are created: this can be done by calling `pull(inlet)` in the `preStart()` callback. -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #simple-sink } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #simple-sink } ### Port states, AbstractInHandler and AbstractOutHandler @@ -186,7 +186,7 @@ To illustrate these concepts we create a small `GraphStage` that implements the Map calls `push(out)` from the `onPush()` handler and it also calls `pull()` from the `onPull` handler resulting in the conceptual wiring above, and fully expressed in code below: -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #one-to-one } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #one-to-one } Map is a typical example of a one-to-one transformation of a stream where demand is passed along upstream elements passed on downstream. @@ -205,7 +205,7 @@ we return the “ball” to our upstream so that we get the new element. This is example by adding a conditional in the `onPush` handler and decide between a `pull(in)` or `push(out)` call (and of course not having a mapping `f` function). -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #many-to-one } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #many-to-one } To complete the picture we define a one-to-many transformation as the next step. We chose a straightforward example stage that emits every upstream element twice downstream. The conceptual wiring of this stage looks like this: @@ -220,7 +220,7 @@ This is a stage that has state: an option with the last element it has seen indi has duplicated this last element already or not. We must also make sure to emit the extra element if the upstream completes. -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #one-to-many } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #one-to-many } In this case a pull from downstream might be consumed by the stage itself rather than passed along upstream as the stage might contain an element it wants to @@ -233,7 +233,7 @@ This example can be simplified by replacing the usage of a mutable state with ca `emitMultiple` which will replace the handlers, emit each of multiple elements and then reinstate the original handlers: -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #simpler-one-to-many } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #simpler-one-to-many } Finally, to demonstrate all of the stages above, we put them together into a processing chain, which conceptually would correspond to the following structure: @@ -246,7 +246,7 @@ which conceptually would correspond to the following structure: In code this is only a few lines, using the `via` use our custom stages in a stream: -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #graph-stage-chain } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #graph-stage-chain } If we attempt to draw the sequence of events, it shows that there is one "event token" in circulation in a potential chain of stages, just like our conceptual "railroad tracks" representation predicts. @@ -289,7 +289,7 @@ See @ref:[Using the SLF4J API directly](../logging.md#slf4j-directly-java) for m The stage then gets access to the `log` field which it can safely use from any `GraphStage` callbacks: -@@snip [GraphStageLoggingDocTest.java](../code/jdocs/stream/GraphStageLoggingDocTest.java) { #stage-with-logging } +@@snip [GraphStageLoggingDocTest.java]($code$/java/jdocs/stream/GraphStageLoggingDocTest.java) { #stage-with-logging } @@@ note @@ -314,7 +314,7 @@ In this sample the stage toggles between open and closed, where open means no el stage starts out as closed but as soon as an element is pushed downstream the gate becomes open for a duration of time during which it will consume and drop upstream messages: -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #timed } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #timed } ### Using asynchronous side-channels @@ -332,7 +332,7 @@ Sharing the AsyncCallback from the constructor risks race conditions, therefore This example shows an asynchronous side channel graph stage that starts dropping elements when a future completes: -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #async-side-channel } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #async-side-channel } ### Integration with actors @@ -369,7 +369,7 @@ necessary (non-blocking) synchronization and visibility guarantees to this share In this sample the materialized value is a future containing the first element to go through the stream: -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #materialized } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #materialized } ### Using attributes to affect the behavior of a stage @@ -420,7 +420,7 @@ initialization. The buffer has demand for up to two elements without any downstr The following code example demonstrates a buffer class corresponding to the message sequence chart above. -@@snip [GraphStageDocTest.java](../code/jdocs/stream/GraphStageDocTest.java) { #detached } +@@snip [GraphStageDocTest.java]($code$/java/jdocs/stream/GraphStageDocTest.java) { #detached } ## Thread safety of custom processing stages diff --git a/akka-docs/src/main/paradox/java/stream/stream-dynamic.md b/akka-docs/src/main/paradox/java/stream/stream-dynamic.md index be1bd01210..87a8a27479 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-dynamic.md +++ b/akka-docs/src/main/paradox/java/stream/stream-dynamic.md @@ -26,11 +26,11 @@ below for usage examples. * **Shutdown** -@@snip [KillSwitchDocTest.java](../code/jdocs/stream/KillSwitchDocTest.java) { #unique-shutdown } +@@snip [KillSwitchDocTest.java]($code$/java/jdocs/stream/KillSwitchDocTest.java) { #unique-shutdown } * **Abort** -@@snip [KillSwitchDocTest.java](../code/jdocs/stream/KillSwitchDocTest.java) { #unique-abort } +@@snip [KillSwitchDocTest.java]($code$/java/jdocs/stream/KillSwitchDocTest.java) { #unique-abort } ### SharedKillSwitch @@ -41,11 +41,11 @@ Refer to the below for usage examples. * **Shutdown** -@@snip [KillSwitchDocTest.java](../code/jdocs/stream/KillSwitchDocTest.java) { #shared-shutdown } +@@snip [KillSwitchDocTest.java]($code$/java/jdocs/stream/KillSwitchDocTest.java) { #shared-shutdown } * **Abort** -@@snip [KillSwitchDocTest.java](../code/jdocs/stream/KillSwitchDocTest.java) { #shared-abort } +@@snip [KillSwitchDocTest.java]($code$/java/jdocs/stream/KillSwitchDocTest.java) { #shared-abort } @@@ note @@ -70,7 +70,7 @@ producers are backpressured. The hub itself comes as a `Source` to which the sin It is not possible to attach any producers until this `Source` has been materialized (started). This is ensured by the fact that we only get the corresponding `Sink` as a materialized value. Usage might look like this: -@@snip [HubDocTest.java](../code/jdocs/stream/HubDocTest.java) { #merge-hub } +@@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #merge-hub } This sequence, while might look odd at first, ensures proper startup order. Once we get the `Sink`, we can use it as many times as wanted. Everything that is fed to it will be delivered to the consumer we attached @@ -83,7 +83,7 @@ rate of the producer will be automatically adapted to the slowest consumer. In t to which the single producer must be attached first. Consumers can only be attached once the `Sink` has been materialized (i.e. the producer has been started). One example of using the `BroadcastHub`: -@@snip [HubDocTest.java](../code/jdocs/stream/HubDocTest.java) { #broadcast-hub } +@@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #broadcast-hub } The resulting `Source` can be materialized any number of times, each materialization effectively attaching a new subscriber. If there are no subscribers attached to this hub then it will not drop any elements but instead @@ -103,13 +103,13 @@ First, we connect a `MergeHub` and a `BroadcastHub` together to form a publish-s we materialize this small stream, we get back a pair of `Source` and `Sink` that together define the publish and subscribe sides of our channel. -@@snip [HubDocTest.java](../code/jdocs/stream/HubDocTest.java) { #pub-sub-1 } +@@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-1 } We now use a few tricks to add more features. First of all, we attach a `Sink.ignore` at the broadcast side of the channel to keep it drained when there are no subscribers. If this behavior is not the desired one this line can be simply dropped. -@@snip [HubDocTest.java](../code/jdocs/stream/HubDocTest.java) { #pub-sub-2 } +@@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-2 } We now wrap the `Sink` and `Source` in a `Flow` using `Flow.fromSinkAndSource`. This bundles up the two sides of the channel into one and forces users of it to always define a publisher and subscriber side @@ -119,10 +119,10 @@ same time. Finally, we add `backpressureTimeout` on the consumer side to ensure that subscribers that block the channel for more than 3 seconds are forcefully removed (and their stream failed). -@@snip [HubDocTest.java](../code/jdocs/stream/HubDocTest.java) { #pub-sub-3 } +@@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-3 } The resulting Flow now has a type of `Flow[String, String, UniqueKillSwitch]` representing a publish-subscribe channel which can be used any number of times to attach new producers or consumers. In addition, it materializes to a `UniqueKillSwitch` (see [UniqueKillSwitch](#unique-kill-switch-java)) that can be used to deregister a single user externally: -@@snip [HubDocTest.java](../code/jdocs/stream/HubDocTest.java) { #pub-sub-4 } \ No newline at end of file +@@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-4 } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-error.md b/akka-docs/src/main/paradox/java/stream/stream-error.md index d0e8992b7d..809a5fcd1f 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-error.md +++ b/akka-docs/src/main/paradox/java/stream/stream-error.md @@ -24,11 +24,11 @@ performed by creating a new instance of the stage. By default the stopping strategy is used for all exceptions, i.e. the stream will be completed with failure when an exception is thrown. -@@snip [FlowErrorDocTest.java](../code/jdocs/stream/FlowErrorDocTest.java) { #stop } +@@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #stop } The default supervision strategy for a stream can be defined on the settings of the materializer. -@@snip [FlowErrorDocTest.java](../code/jdocs/stream/FlowErrorDocTest.java) { #resume } +@@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #resume } Here you can see that all `ArithmeticException` will resume the processing, i.e. the elements that cause the division by zero are effectively dropped. @@ -42,12 +42,12 @@ cycles, as explained in @ref:[Graph cycles, liveness and deadlocks](stream-graph The supervision strategy can also be defined for all operators of a flow. -@@snip [FlowErrorDocTest.java](../code/jdocs/stream/FlowErrorDocTest.java) { #resume-section } +@@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #resume-section } `Restart` works in a similar way as `Resume` with the addition that accumulated state, if any, of the failing processing stage will be reset. -@@snip [FlowErrorDocTest.java](../code/jdocs/stream/FlowErrorDocTest.java) { #restart-section } +@@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #restart-section } ## Errors from mapAsync @@ -58,11 +58,11 @@ discard those that cannot be found. We start with the tweet stream of authors: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #tweet-authors } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #tweet-authors } Assume that we can lookup their email address using: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #email-address-lookup2 } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-address-lookup2 } The `CompletionStage` is completed normally if the email is not found. @@ -70,7 +70,7 @@ Transforming the stream of authors to a stream of email addresses by using the ` service can be done with `mapAsync` and we use `Supervision.getResumingDecider` to drop unknown email addresses: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync-supervision } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync-supervision } If we would not use `Resume` the default stopping strategy would complete the stream with failure on the first `CompletionStage` that was completed exceptionally. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-flows-and-basics.md b/akka-docs/src/main/paradox/java/stream/stream-flows-and-basics.md index 57fb0ec90a..f6348d2da4 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-flows-and-basics.md +++ b/akka-docs/src/main/paradox/java/stream/stream-flows-and-basics.md @@ -77,7 +77,7 @@ starting up Actors). Thanks to Flows being simply a description of the processin thread-safe, and freely shareable*, which means that it is for example safe to share and send them between actors, to have one actor prepare the work, and then have it be materialized at some completely different place in the code. -@@snip [FlowDocTest.java](../code/jdocs/stream/FlowDocTest.java) { #materialization-in-steps } +@@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #materialization-in-steps } After running (materializing) the `RunnableGraph` we get a special container object, the `MaterializedMap`. Both sources and sinks are able to put specific objects into this map. Whether they put something in or not is implementation @@ -88,12 +88,12 @@ there is a convenience method called `runWith()` available for `Sink`, `Source` a supplied `Source` (in order to run a `Sink`), a `Sink` (in order to run a `Source`) or both a `Source` and a `Sink` (in order to run a `Flow`, since it has neither attached yet). -@@snip [FlowDocTest.java](../code/jdocs/stream/FlowDocTest.java) { #materialization-runWith } +@@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #materialization-runWith } It is worth pointing out that since processing stages are *immutable*, connecting them returns a new processing stage, instead of modifying the existing instance, so while constructing long flows, remember to assign the new value to a variable or run it: -@@snip [FlowDocTest.java](../code/jdocs/stream/FlowDocTest.java) { #source-immutable } +@@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #source-immutable } @@@ note @@ -112,18 +112,18 @@ In the example below we create two running materialized instance of the stream t variable, and both materializations give us a different `CompletionStage` from the map even though we used the same `sink` to refer to the future: -@@snip [FlowDocTest.java](../code/jdocs/stream/FlowDocTest.java) { #stream-reuse } +@@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #stream-reuse } ### Defining sources, sinks and flows The objects `Source` and `Sink` define various ways to create sources and sinks of elements. The following examples show some of the most useful constructs (refer to the API documentation for more details): -@@snip [FlowDocTest.java](../code/jdocs/stream/FlowDocTest.java) { #source-sink } +@@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #source-sink } There are various ways to wire up different parts of a stream, the following examples show some of the available options: -@@snip [FlowDocTest.java](../code/jdocs/stream/FlowDocTest.java) { #flow-connecting } +@@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #flow-connecting } ### Illegal stream elements @@ -238,7 +238,7 @@ To allow for parallel processing you will have to insert asynchronous boundaries graphs by way of adding `Attributes.asyncBoundary` using the method `async` on `Source`, `Sink` and `Flow` to pieces that shall communicate with the rest of the graph in an asynchronous fashion. -@@snip [FlowDocTest.java](../code/jdocs/stream/FlowDocTest.java) { #flow-async } +@@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #flow-async } In this example we create two regions within the flow which will be executed in one Actor each—assuming that adding and multiplying integers is an extremely costly operation this will lead to a performance gain since two CPUs can @@ -278,7 +278,7 @@ to somehow express how these values should be composed to a final value when we many combinator methods have variants that take an additional argument, a function, that will be used to combine the resulting values. Some examples of using these combiners are illustrated in the example below. -@@snip [FlowDocTest.java](../code/jdocs/stream/FlowDocTest.java) { #flow-mat-combine } +@@snip [FlowDocTest.java]($code$/java/jdocs/stream/FlowDocTest.java) { #flow-mat-combine } @@@ note diff --git a/akka-docs/src/main/paradox/java/stream/stream-graphs.md b/akka-docs/src/main/paradox/java/stream/stream-graphs.md index 0ff4a63b7b..07748513e0 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-graphs.md +++ b/akka-docs/src/main/paradox/java/stream/stream-graphs.md @@ -47,7 +47,7 @@ Such graph is simple to translate to the Graph DSL since each linear element cor and each circle corresponds to either a `Junction` or a `Source` or `Sink` if it is beginning or ending a `Flow`. -@@snip [GraphDSLDocTest.java](../code/jdocs/stream/GraphDSLDocTest.java) { #simple-graph-dsl } +@@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #simple-graph-dsl } @@@ note @@ -74,7 +74,7 @@ In the example below we prepare a graph that consists of two parallel streams, in which we re-use the same instance of `Flow`, yet it will properly be materialized as two connections between the corresponding Sources and Sinks: -@@snip [GraphDSLDocTest.java](../code/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-reusing-a-flow } +@@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-reusing-a-flow } ## Constructing and combining Partial Graphs @@ -94,7 +94,7 @@ Let's imagine we want to provide users with a specialized element that given 3 i the greatest int value of each zipped triple. We'll want to expose 3 input ports (unconnected sources) and one output port (unconnected sink). -@@snip [StreamPartialGraphDSLDocTest.java](../code/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #simple-partial-graph-dsl } +@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #simple-partial-graph-dsl } As you can see, first we construct the partial graph that describes how to compute the maximum of two input streams, then we reuse that twice while constructing the partial graph that extends this to three input streams, @@ -134,12 +134,12 @@ be attached before this Source can run”. Refer to the example below, in which we create a Source that zips together two numbers, to see this graph construction in action: -@@snip [StreamPartialGraphDSLDocTest.java](../code/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-from-partial-graph-dsl } +@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-from-partial-graph-dsl } Similarly the same can be done for a `Sink` using `SinkShape.of` in which case the provided value must be an `Inlet`. For defining a `Flow` we need to expose both an undefined source and sink: -@@snip [StreamPartialGraphDSLDocTest.java](../code/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #flow-from-partial-graph-dsl } +@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #flow-from-partial-graph-dsl } ## Combining Sources and Sinks with simplified API @@ -147,11 +147,11 @@ There is simplified API you can use to combine sources and sinks with junctions `Merge` and `Concat` without the need for using the Graph DSL. The combine method takes care of constructing the necessary graph underneath. In following example we combine two sources into one (fan-in): -@@snip [StreamPartialGraphDSLDocTest.java](../code/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-combine } +@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-combine } The same can be done for a `Sink` but in this case it will be fan-out: -@@snip [StreamPartialGraphDSLDocTest.java](../code/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #sink-combine } +@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #sink-combine } ## Bidirectional Flows @@ -166,19 +166,19 @@ this purpose exists the special type `BidiFlow` which is a graph that has exactly two open inlets and two open outlets. The corresponding shape is called `BidiShape` and is defined like this: -@@snip [Shape.scala]../../../../../../akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } +@@snip [Shape.scala]($akka$/akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } A bidirectional flow is defined just like a unidirectional `Flow` as demonstrated for the codec mentioned above: -@@snip [BidiFlowDocTest.java](../code/jdocs/stream/BidiFlowDocTest.java) { #codec } +@@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #codec } The first version resembles the partial graph constructor, while for the simple case of a functional 1:1 transformation there is a concise convenience method as shown on the last line. The implementation of the two functions is not difficult either: -@@snip [BidiFlowDocTest.java](../code/jdocs/stream/BidiFlowDocTest.java) { #codec-impl } +@@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #codec-impl } In this way you could easily integrate any other serialization library that turns an object into a sequence of bytes. @@ -188,11 +188,11 @@ a framing protocol means that any received chunk of bytes may correspond to zero or more messages. This is best implemented using a `GraphStage` (see also @ref:[Custom processing with GraphStage](stream-customize.md#graphstage-java)). -@@snip [BidiFlowDocTest.java](../code/jdocs/stream/BidiFlowDocTest.java) { #framing } +@@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #framing } With these implementations we can build a protocol stack and test it: -@@snip [BidiFlowDocTest.java](../code/jdocs/stream/BidiFlowDocTest.java) { #compose } +@@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #compose } This example demonstrates how `BidiFlow` subgraphs can be hooked together and also turned around with the `.reversed()` method. The test @@ -208,12 +208,12 @@ can be used in the graph as an ordinary source or outlet, and which will eventua If the materialized value is needed at more than one place, it is possible to call `materializedValue` any number of times to acquire the necessary number of outlets. -@@snip [GraphDSLDocTest.java](../code/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-matvalue } +@@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-matvalue } Be careful not to introduce a cycle where the materialized value actually contributes to the materialized value. The following example demonstrates a case where the materialized `CompletionStage` of a fold is fed back to the fold itself. -@@snip [GraphDSLDocTest.java](../code/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-matvalue-cycle } +@@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-matvalue-cycle } ## Graph cycles, liveness and deadlocks @@ -230,7 +230,7 @@ The graph takes elements from the source, prints them, then broadcasts those ele to a consumer (we just used `Sink.ignore` for now) and to a feedback arc that is merged back into the main via a `Merge` junction. -@@snip [GraphCyclesDocTest.java](../code/jdocs/stream/GraphCyclesDocTest.java) { #deadlocked } +@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #deadlocked } Running this we observe that after a few numbers have been printed, no more elements are logged to the console - all processing stops after some time. After some investigation we observe that: @@ -248,7 +248,7 @@ If we modify our feedback loop by replacing the `Merge` junction with a `MergePr before trying the other lower priority input ports. Since we feed back through the preferred port it is always guaranteed that the elements in the cycles can flow. -@@snip [GraphCyclesDocTest.java](../code/jdocs/stream/GraphCyclesDocTest.java) { #unfair } +@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #unfair } If we run the example we see that the same sequence of numbers are printed over and over again, but the processing does not stop. Hence, we avoided the deadlock, but `source` is still @@ -266,7 +266,7 @@ be balanced (as many elements are removed as many are injected) then there would To make our cycle both live (not deadlocking) and fair we can introduce a dropping element on the feedback arc. In this case we chose the `buffer()` operation giving it a dropping strategy `OverflowStrategy.dropHead`. -@@snip [GraphCyclesDocTest.java](../code/jdocs/stream/GraphCyclesDocTest.java) { #dropping } +@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #dropping } If we run this example we see that @@ -285,7 +285,7 @@ the beginning instead. To achieve this we modify our first graph by replacing th Since `ZipWith` takes one element from `source` *and* from the feedback arc to inject one element into the cycle, we maintain the balance of elements. -@@snip [GraphCyclesDocTest.java](../code/jdocs/stream/GraphCyclesDocTest.java) { #zipping-dead } +@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-dead } Still, when we try to run the example it turns out that no element is printed at all! After some investigation we realize that: @@ -297,7 +297,7 @@ These two conditions are a typical "chicken-and-egg" problem. The solution is to element into the cycle that is independent from `source`. We do this by using a `Concat` junction on the backwards arc that injects a single element using `Source.single`. -@@snip [GraphCyclesDocTest.java](../code/jdocs/stream/GraphCyclesDocTest.java) { #zipping-live } +@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-live } When we run the above example we see that processing starts and never stops. The important takeaway from this example is that balanced cycles often need an initial "kick-off" element to be injected into the cycle. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-integrations.md b/akka-docs/src/main/paradox/java/stream/stream-integrations.md index 1b5427b92c..434488374f 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-integrations.md +++ b/akka-docs/src/main/paradox/java/stream/stream-integrations.md @@ -15,7 +15,7 @@ use `ask` in `mapAsync`. The back-pressure of the stream is maintained by the `CompletionStage` of the `ask` and the mailbox of the actor will not be filled with more messages than the given `parallelism` of the `mapAsync` stage. -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #mapAsync-ask } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #mapAsync-ask } Note that the messages received in the actor will be in the same order as the stream elements, i.e. the `parallelism` does not change the ordering @@ -28,7 +28,7 @@ The actor must reply to the `getSender()` for each message from the stream. That reply will complete the `CompletionStage` of the `ask` and it will be the element that is emitted downstreams from `mapAsync`. -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #ask-actor } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #ask-actor } The stream can be completed with failure by sending `akka.actor.Status.Failure` as reply from the actor. @@ -113,24 +113,24 @@ performed with `mapAsync` or `mapAsyncUnordered`. For example, sending emails to the authors of selected tweets using an external email service: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #email-server-send } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-server-send } We start with the tweet stream of authors: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #tweet-authors } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #tweet-authors } Assume that we can lookup their email address using: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #email-address-lookup } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-address-lookup } Transforming the stream of authors to a stream of email addresses by using the `lookupEmail` service can be done with `mapAsync`: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync } Finally, sending the emails: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #send-emails } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #send-emails } `mapAsync` is applying the given function that is calling out to the external service to each of the elements as they pass through this processing step. The function returns a `CompletionStage` @@ -152,23 +152,23 @@ result stream onwards for further processing or storage. Note that `mapAsync` preserves the order of the stream elements. In this example the order is not important and then we can use the more efficient `mapAsyncUnordered`: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #external-service-mapAsyncUnordered } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #external-service-mapAsyncUnordered } In the above example the services conveniently returned a `CompletionStage` of the result. If that is not the case you need to wrap the call in a `CompletionStage`. If the service call involves blocking you must also make sure that you run it on a dedicated execution context, to avoid starvation and disturbance of other tasks in the system. -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #blocking-mapAsync } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #blocking-mapAsync } The configuration of the `"blocking-dispatcher"` may look something like: -@@snip [IntegrationDocSpec.scala](../../scala/code/docs/stream/IntegrationDocSpec.scala) { #blocking-dispatcher-config } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-dispatcher-config } An alternative for blocking calls is to perform them in a `map` operation, still using a dedicated dispatcher for that operation. -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #blocking-map } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #blocking-map } However, that is not exactly the same as `mapAsync`, since the `mapAsync` may run several calls concurrently, but `map` performs them one at a time. @@ -176,7 +176,7 @@ several calls concurrently, but `map` performs them one at a time. For a service that is exposed as an actor, or if an actor is used as a gateway in front of an external service, you can use `ask`: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #save-tweets } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #save-tweets } Note that if the `ask` is not completed within the given timeout the stream is completed with failure. If that is not desired outcome you can use `recover` on the `ask` `CompletionStage`. @@ -204,14 +204,14 @@ successive calls as long as there is downstream demand of several elements. Here is a fictive service that we can use to illustrate these aspects. -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-service } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-service } Elements starting with a lower case character are simulated to take longer time to process. Here is how we can use it with `mapAsync`: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-mapAsync } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-mapAsync } The output may look like this: @@ -268,7 +268,7 @@ calls are limited by the buffer size (4) of the `ActorMaterializerSettings`. Here is how we can use the same service with `mapAsyncUnordered`: -@@snip [IntegrationDocTest.java](../code/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-mapAsyncUnordered } +@@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #sometimes-slow-mapAsyncUnordered } The output may look like this: @@ -338,19 +338,19 @@ An incomplete list of other implementations: The two most important interfaces in Reactive Streams are the `Publisher` and `Subscriber`. -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #imports } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #imports } Let us assume that a library provides a publisher of tweets: -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #tweets-publisher } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #tweets-publisher } and another library knows how to store author handles in a database: -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #author-storage-subscriber } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #author-storage-subscriber } Using an Akka Streams `Flow` we can transform the stream and connect those: -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #authors #connect-all } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #authors #connect-all } The `Publisher` is used as an input `Source` to the flow and the `Subscriber` is used as an output `Sink`. @@ -359,23 +359,23 @@ A `Flow` can also be also converted to a `RunnableGraph[Processor[In, Out]]` whi materializes to a `Processor` when `run()` is called. `run()` itself can be called multiple times, resulting in a new `Processor` instance each time. -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #flow-publisher-subscriber } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #flow-publisher-subscriber } A publisher can be connected to a subscriber with the `subscribe` method. It is also possible to expose a `Source` as a `Publisher` by using the Publisher-`Sink`: -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #source-publisher } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #source-publisher } A publisher that is created with `Sink.asPublisher(AsPublisher.WITHOUT_FANOUT)` supports only a single subscription. Additional subscription attempts will be rejected with an `IllegalStateException`. A publisher that supports multiple subscribers using fan-out/broadcasting is created as follows: -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #author-alert-subscriber #author-storage-subscriber } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #author-alert-subscriber #author-storage-subscriber } -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #source-fanoutPublisher } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #source-fanoutPublisher } The input buffer size of the stage controls how far apart the slowest subscriber can be from the fastest subscriber before slowing down the stream. @@ -383,12 +383,12 @@ before slowing down the stream. To make the picture complete, it is also possible to expose a `Sink` as a `Subscriber` by using the Subscriber-`Source`: -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #sink-subscriber } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #sink-subscriber } It is also possible to use re-wrap `Processor` instances as a `Flow` by passing a factory function that will create the `Processor` instances: -@@snip [ReactiveStreamsDocTest.java](../code/jdocs/stream/ReactiveStreamsDocTest.java) { #use-processor } +@@snip [ReactiveStreamsDocTest.java]($code$/java/jdocs/stream/ReactiveStreamsDocTest.java) { #use-processor } Please note that a factory is necessary to achieve reusability of the resulting `Flow`. @@ -436,7 +436,7 @@ stream publisher that keeps track of the subscription life cycle and requested e Here is an example of such an actor. It dispatches incoming jobs to the attached subscriber: -@@snip [ActorPublisherDocTest.java](../code/jdocs/stream/ActorPublisherDocTest.java) { #job-manager } +@@snip [ActorPublisherDocTest.java]($code$/java/jdocs/stream/ActorPublisherDocTest.java) { #job-manager } You send elements to the stream by calling `onNext`. You are allowed to send as many elements as have been requested by the stream subscriber. This amount can be inquired with @@ -468,7 +468,7 @@ More detailed information can be found in the API documentation. This is how it can be used as input `Source` to a `Flow`: -@@snip [ActorPublisherDocTest.java](../code/jdocs/stream/ActorPublisherDocTest.java) { #actor-publisher-usage } +@@snip [ActorPublisherDocTest.java]($code$/java/jdocs/stream/ActorPublisherDocTest.java) { #actor-publisher-usage } You can only attach one subscriber to this publisher. Use a `Broadcast`-element or attach a `Sink.asPublisher(AsPublisher.WITH_FANOUT)` to enable multiple subscribers. @@ -493,7 +493,7 @@ messages from the stream. It can also receive other, non-stream messages, in the Here is an example of such an actor. It dispatches incoming jobs to child worker actors: -@@snip [ActorSubscriberDocTest.java](../code/jdocs/stream/ActorSubscriberDocTest.java) { #worker-pool } +@@snip [ActorSubscriberDocTest.java]($code$/java/jdocs/stream/ActorSubscriberDocTest.java) { #worker-pool } Subclass must define the `RequestStrategy` to control stream back pressure. After each incoming message the `AbstractActorSubscriber` will automatically invoke @@ -511,4 +511,4 @@ More detailed information can be found in the API documentation. This is how it can be used as output `Sink` to a `Flow`: -@@snip [ActorSubscriberDocTest.java](../code/jdocs/stream/ActorSubscriberDocTest.java) { #actor-subscriber-usage } \ No newline at end of file +@@snip [ActorSubscriberDocTest.java]($code$/java/jdocs/stream/ActorSubscriberDocTest.java) { #actor-subscriber-usage } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-io.md b/akka-docs/src/main/paradox/java/stream/stream-io.md index 6b22b2d37d..d9534420a1 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-io.md +++ b/akka-docs/src/main/paradox/java/stream/stream-io.md @@ -12,7 +12,7 @@ as the library does it transparently for you. In order to implement a simple EchoServer we `bind` to a given address, which returns a `Source>`, which will emit an `IncomingConnection` element for each new connection that the Server should handle: -@@snip [StreamTcpDocTest.java](../code/jdocs/stream/io/StreamTcpDocTest.java) { #echo-server-simple-bind } +@@snip [StreamTcpDocTest.java]($code$/java/jdocs/stream/io/StreamTcpDocTest.java) { #echo-server-simple-bind } ![tcp-stream-bind.png](../../images/tcp-stream-bind.png) @@ -23,7 +23,7 @@ helper Flow from `akka.stream.javadsl.Framing` to chunk the inputs up into actua argument indicates that we require an explicit line ending even for the last message before the connection is closed. In this example we simply add exclamation marks to each incoming text message and push it through the flow: -@@snip [StreamTcpDocTest.java](../code/jdocs/stream/io/StreamTcpDocTest.java) { #echo-server-simple-handle } +@@snip [StreamTcpDocTest.java]($code$/java/jdocs/stream/io/StreamTcpDocTest.java) { #echo-server-simple-handle } ![tcp-stream-run.png](../../images/tcp-stream-run.png) @@ -49,7 +49,7 @@ Let's say we know a server has exposed a simple command line interface over TCP, and would like to interact with it using Akka Streams over TCP. To open an outgoing connection socket we use the `outgoingConnection` method: -@@snip [StreamTcpDocTest.java](../code/jdocs/stream/io/StreamTcpDocTest.java) { #repl-client } +@@snip [StreamTcpDocTest.java]($code$/java/jdocs/stream/io/StreamTcpDocTest.java) { #repl-client } The `repl` flow we use to handle the server interaction first prints the servers response, then awaits on input from the command line (this blocking call is used here just for the sake of simplicity) and converts it to a @@ -84,7 +84,7 @@ Thankfully in most situations finding the right spot to start the conversation i to the protocol we are trying to implement using Streams. In chat-like applications, which our examples resemble, it makes sense to make the Server initiate the conversation by emitting a "hello" message: -@@snip [StreamTcpDocTest.java](../code/jdocs/stream/io/StreamTcpDocTest.java) { #welcome-banner-chat-server } +@@snip [StreamTcpDocTest.java]($code$/java/jdocs/stream/io/StreamTcpDocTest.java) { #welcome-banner-chat-server } To emit the initial message we merge a `Source` with a single element, after the command processing but before the framing and transformation to `ByteString` s this way we do not have to repeat such logic. @@ -101,7 +101,7 @@ on files. Streaming data from a file is as easy as creating a *FileIO.fromPath* given a target path, and an optional `chunkSize` which determines the buffer size determined as one "element" in such stream: -@@snip [StreamFileDocTest.java](../code/jdocs/stream/io/StreamFileDocTest.java) { #file-source } +@@snip [StreamFileDocTest.java]($code$/java/jdocs/stream/io/StreamFileDocTest.java) { #file-source } Please note that these processing stages are backed by Actors and by default are configured to run on a pre-configured threadpool-backed dispatcher dedicated for File IO. This is very important as it isolates the blocking file IO operations from the rest @@ -109,4 +109,4 @@ of the ActorSystem allowing each dispatcher to be utilised in the most efficient dispatcher for file IO operations globally, you can do so by changing the `akka.stream.blocking-io-dispatcher`, or for a specific stage by specifying a custom Dispatcher in code, like this: -@@snip [StreamFileDocTest.java](../code/jdocs/stream/io/StreamFileDocTest.java) { #custom-dispatcher-code } \ No newline at end of file +@@snip [StreamFileDocTest.java]($code$/java/jdocs/stream/io/StreamFileDocTest.java) { #custom-dispatcher-code } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-parallelism.md b/akka-docs/src/main/paradox/java/stream/stream-parallelism.md index e7031429ed..1748e29902 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-parallelism.md +++ b/akka-docs/src/main/paradox/java/stream/stream-parallelism.md @@ -23,7 +23,7 @@ are two pancakes being cooked at the same time, one being cooked on its first si completion. This is how this setup would look like implemented as a stream: -@@snip [FlowParallelismDocTest.java](../code/jdocs/stream/FlowParallelismDocTest.java) { #pipelining } +@@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelining } The two `map` stages in sequence (encapsulated in the "frying pan" flows) will be executed in a pipelined way, basically doing the same as Roland with his frying pans: @@ -53,7 +53,7 @@ the results on a shared plate. Whenever a pan becomes empty, he takes the next s In essence he parallelizes the same process over multiple pans. This is how this setup will look like if implemented using streams: -@@snip [FlowParallelismDocTest.java](../code/jdocs/stream/FlowParallelismDocTest.java) { #parallelism } +@@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #parallelism } The benefit of parallelizing is that it is easy to scale. In the pancake example it is easy to add a third frying pan with Patrik's method, but Roland cannot add a third frying pan, @@ -75,7 +75,7 @@ First, let's look at how we can parallelize pipelined processing stages. In the will employ two chefs, each working using Roland's pipelining method, but we use the two chefs in parallel, just like Patrik used the two frying pans. This is how it looks like if expressed as streams: -@@snip [FlowParallelismDocTest.java](../code/jdocs/stream/FlowParallelismDocTest.java) { #parallel-pipeline } +@@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #parallel-pipeline } The above pattern works well if there are many independent jobs that do not depend on the results of each other, but the jobs themselves need multiple processing steps where each step builds on the result of @@ -93,7 +93,7 @@ plate. This is again straightforward to implement with the streams API: -@@snip [FlowParallelismDocTest.java](../code/jdocs/stream/FlowParallelismDocTest.java) { #pipelined-parallel } +@@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelined-parallel } This usage pattern is less common but might be usable if a certain step in the pipeline might take wildly different times to finish different jobs. The reason is that there are more balance-merge steps in this pattern diff --git a/akka-docs/src/main/paradox/java/stream/stream-quickstart.md b/akka-docs/src/main/paradox/java/stream/stream-quickstart.md index 204f8e528e..705a006279 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-quickstart.md +++ b/akka-docs/src/main/paradox/java/stream/stream-quickstart.md @@ -8,19 +8,19 @@ choice as described in @ref:[Using a build tool](../../scala/intro/getting-start A stream usually begins at a source, so this is also how we start an Akka Stream. Before we create one, we import the full complement of streaming tools: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #stream-imports } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #stream-imports } If you want to execute the code samples while you read through the quick start guide, you will also need the following imports: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #other-imports } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #other-imports } And a class to hold your code, for example: -@@snip [Main.java](../code/jdocs/stream/Main.java) { #main-app } +@@snip [Main.java]($code$/java/jdocs/stream/Main.java) { #main-app } Now we will start with a rather simple source, emitting the integers 1 to 100: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #create-source } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #create-source } The `Source` type is parameterized with two types: the first one is the type of element that this source emits and the second one may signal that @@ -33,7 +33,7 @@ Having created this source means that we have a description of how to emit the first 100 natural numbers, but this source is not yet active. In order to get those numbers out we have to run it: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #run-source } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #run-source } This line will complement the source with a consumer function—in this example we simply print out the numbers to the console—and pass this little stream @@ -45,13 +45,13 @@ When running this program you might notice it does not terminate, because the `ActorSystem` is never terminated. Luckily `runForeach` returns a `CompletionStage` which resolves when the stream finishes: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #run-source-and-terminate } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #run-source-and-terminate } You may wonder where the Actor gets created that runs the stream, and you are probably also asking yourself what this `materializer` means. In order to get this value we first need to create an Actor system: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #create-materializer } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #create-materializer } There are other ways to create a materializer, e.g. from an `ActorContext` when using streams from within Actors. The @@ -65,7 +65,7 @@ description of what you want to run, and like an architect’s blueprint it can be reused, incorporated into a larger design. We may choose to transform the source of integers and write it to a file instead: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #transform-source } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #transform-source } First we use the `scan` combinator to run a computation over the whole stream: starting with the number 1 (`BigInteger.ONE`) we multiple by each of @@ -92,7 +92,7 @@ language for writing these streams always flows from left to right (just like plain English), we need a starting point that is like a source but with an “open” input. In Akka Streams this is called a `Flow`: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #transform-sink } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #transform-sink } Starting from a flow of strings we convert each to `ByteString` and then feed to the already known file-writing `Sink`. The resulting blueprint @@ -108,7 +108,7 @@ We can use the new and shiny `Sink` we just created by attaching it to our `factorials` source—after a small adaptation to turn the numbers into strings: -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #use-transformed-sink } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #use-transformed-sink } ## Time-Based Processing @@ -120,7 +120,7 @@ number emitted by the `factorials` source is the factorial of zero, the second is the factorial of one, and so on. We combine these two by forming strings like `"3! = 6"`. -@@snip [QuickStartDocTest.java](../code/jdocs/stream/QuickStartDocTest.java) { #add-streams } +@@snip [QuickStartDocTest.java]($code$/java/jdocs/stream/QuickStartDocTest.java) { #add-streams } All operations so far have been time-independent and could have been performed in the same fashion on strict collections of elements. The next line @@ -162,7 +162,7 @@ allow to control what should happen in such scenarios. Here's the data model we'll be working with throughout the quickstart examples: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #model } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #model } @@@ note @@ -180,7 +180,7 @@ like for example finding all twitter handles of users who tweet about `#akka`. In order to prepare our environment by creating an `ActorSystem` and `ActorMaterializer`, which will be responsible for materializing and running the streams we are about to create: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #materializer-setup } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #materializer-setup } The `ActorMaterializer` can optionally take `ActorMaterializerSettings` which can be used to define materialization properties, such as default buffer sizes (see also @ref:[Buffers for asynchronous stages](stream-rate.md#async-stream-buffers-java)), the dispatcher to @@ -188,7 +188,7 @@ be used by the pipeline etc. These can be overridden with `withAttributes` on `F Let's assume we have a stream of tweets readily available. In Akka this is expressed as a `Source`: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweet-source } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweet-source } Streams always start flowing from a `Source` then can continue through `Flow` elements or more advanced graph elements to finally be consumed by a `Sink`. @@ -202,7 +202,7 @@ The operations should look familiar to anyone who has used the Scala Collections however they operate on streams and not collections of data (which is a very important distinction, as some operations only make sense in streaming and vice versa): -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-filter-map } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-filter-map } Finally in order to @ref:[materialize](stream-flows-and-basics.md#stream-materialization-java) and run the stream computation we need to attach the Flow to a `Sink` that will get the Flow running. The simplest way to do this is to call @@ -210,18 +210,18 @@ the Flow to a `Sink` that will get the Flow running. The simplest way to d the `Sink class`. For now let's simply print each author: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-foreachsink-println } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-foreachsink-println } or by using the shorthand version (which are defined only for the most popular Sinks such as `Sink.fold` and `Sink.foreach`): -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-foreach-println } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-foreach-println } Materializing and running a stream always requires a `Materializer` to be passed in explicitly, like this: `.run(mat)`. The complete snippet looks like this: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #first-sample } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #first-sample } ## Flattening sequences in streams @@ -230,7 +230,7 @@ we might want to map from one element to a number of elements and receive a "fla works on Scala Collections. In order to get a flattened stream of hashtags from our stream of tweets we can use the `mapConcat` combinator: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #hashtags-mapConcat } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #hashtags-mapConcat } @@@ note @@ -260,7 +260,7 @@ at the expense of not reading as familiarly as collection transformations. Graphs are constructed using `GraphDSL` like this: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #graph-dsl-broadcast } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #graph-dsl-broadcast } As you can see, we use graph builder `b` to construct the graph using `UniformFanOutShape` and `Flow` s. @@ -289,7 +289,7 @@ in either `OutOfMemoryError` s or other severe degradations of service responsiv and must be handled explicitly. For example, if we are only interested in the "*most recent tweets, with a buffer of 10 elements*" this can be expressed using the `buffer` element: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-slow-consumption-dropHead } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-slow-consumption-dropHead } The `buffer` element takes an explicit and required `OverflowStrategy`, which defines how the buffer should react when it receives another element while it is full. Strategies provided include dropping the oldest element (`dropHead`), @@ -307,7 +307,7 @@ but in general it is possible to deal with finite streams and come up with a nic First, let's write such an element counter using `Flow.of(Class)` and `Sink.fold` to see how the types look like: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-fold-count } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-fold-count } First we prepare a reusable `Flow` that will change each incoming tweet into an integer of value `1`. We'll use this in order to combine those with a `Sink.fold` that will sum all `Integer` elements of the stream and make its result available as @@ -333,13 +333,13 @@ and materialized multiple times, because it is just the "blueprint" of the strea for example one that consumes a live stream of tweets within a minute, the materialized values for those two materializations will be different, as illustrated by this example: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-runnable-flow-materialized-twice } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-runnable-flow-materialized-twice } Many elements in Akka Streams provide materialized values which can be used for obtaining either results of computation or steering these elements which will be discussed in detail in @ref:[Stream Materialization](stream-flows-and-basics.md#stream-materialization-java). Summing up this section, now we know what happens behind the scenes when we run this one-liner, which is equivalent to the multi line version above: -@@snip [TwitterStreamQuickstartDocTest.java](../code/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-fold-count-oneline } +@@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-fold-count-oneline } @@@ note diff --git a/akka-docs/src/main/paradox/java/stream/stream-rate.md b/akka-docs/src/main/paradox/java/stream/stream-rate.md index 64d412246a..e8e8c1a38d 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-rate.md +++ b/akka-docs/src/main/paradox/java/stream/stream-rate.md @@ -12,7 +12,7 @@ To run a stage asynchronously it has to be marked explicitly as such using the ` asynchronously means that a stage, after handing out an element to its downstream consumer is able to immediately process the next message. To demonstrate what we mean by this, let's take a look at the following example: -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #pipelining } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #pipelining } Running the above example, one of the possible outputs looks like this: @@ -61,16 +61,16 @@ akka.stream.materializer.max-input-buffer-size = 16 Alternatively they can be set by passing a `ActorMaterializerSettings` to the materializer: -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #materializer-buffer } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #materializer-buffer } If the buffer size needs to be set for segments of a `Flow` only, it is possible by defining a separate `Flow` with these attributes: -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #section-buffer } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #section-buffer } Here is an example of a code that demonstrate some of the issues caused by internal buffers: -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #buffering-abstraction-leak } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #buffering-abstraction-leak } Running the above example one would expect the number *3* to be printed in every 3 seconds (the `conflateWithSeed` step here is configured so that it counts the number of elements received before the downstream `ZipWith` consumes @@ -94,7 +94,7 @@ pipeline of an application. The example below will ensure that 1000 jobs (but not more) are dequeued from an external (imaginary) system and stored locally in memory - relieving the external system: -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-backpressure } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-backpressure } The next example will also queue up 1000 jobs locally, but if there are more jobs waiting in the imaginary external systems, it makes space for the new element by @@ -102,12 +102,12 @@ dropping one element from the *tail* of the buffer. Dropping from the tail is a it must be noted that this will drop the *youngest* waiting job. If some "fairness" is desired in the sense that we want to be nice to jobs that has been waiting for long, then this option can be useful. -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-droptail } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-droptail } Instead of dropping the youngest element from the tail of the buffer a new element can be dropped without enqueueing it to the buffer at all. -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-dropnew } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-dropnew } Here is another example with a queue of 1000 jobs, but it makes space for the new element by dropping one element from the *head* of the buffer. This is the *oldest* @@ -116,13 +116,13 @@ resent if not processed in a certain period. The oldest element will be retransmitted soon, (in fact a retransmitted duplicate might be already in the queue!) so it makes sense to drop it first. -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-drophead } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-drophead } Compared to the dropping strategies above, dropBuffer drops all the 1000 jobs it has enqueued once the buffer gets full. This aggressive strategy is useful when dropping jobs is preferred to delaying jobs. -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-dropbuffer } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-dropbuffer } If our imaginary external job provider is a client using our API, we might want to enforce that the client cannot have more than 1000 queued jobs @@ -130,7 +130,7 @@ otherwise we consider it flooding and terminate the connection. This is easily achievable by the error strategy which simply fails the stream once the buffer gets full. -@@snip [StreamBuffersRateDocTest.java](../code/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-fail } +@@snip [StreamBuffersRateDocTest.java]($code$/java/jdocs/stream/StreamBuffersRateDocTest.java) { #explicit-buffers-fail } ## Rate transformation @@ -142,7 +142,7 @@ useful to combine elements from a producer until a demand signal comes from a co Below is an example snippet that summarizes fast stream of elements to a standard deviation, mean and count of elements that have arrived while the stats have been calculated. -@@snip [RateTransformationDocTest.java](../code/jdocs/stream/RateTransformationDocTest.java) { #conflate-summarize } +@@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #conflate-summarize } This example demonstrates that such flow's rate is decoupled. The element rate at the start of the flow can be much higher that the element rate at the end of the flow. @@ -151,7 +151,7 @@ Another possible use of `conflate` is to not consider all elements for summary w Example below demonstrates how `conflate` can be used to implement random drop of elements when consumer is not able to keep up with the producer. -@@snip [RateTransformationDocTest.java](../code/jdocs/stream/RateTransformationDocTest.java) { #conflate-sample } +@@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #conflate-sample } ### Understanding expand @@ -161,12 +161,12 @@ allows to extrapolate a value to be sent as an element to a consumer. As a simple use of `expand` here is a flow that sends the same element to consumer when producer does not send any new elements. -@@snip [RateTransformationDocTest.java](../code/jdocs/stream/RateTransformationDocTest.java) { #expand-last } +@@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #expand-last } Expand also allows to keep some state between demand requests from the downstream. Leveraging this, here is a flow that tracks and reports a drift between fast consumer and slow producer. -@@snip [RateTransformationDocTest.java](../code/jdocs/stream/RateTransformationDocTest.java) { #expand-drift } +@@snip [RateTransformationDocTest.java]($code$/java/jdocs/stream/RateTransformationDocTest.java) { #expand-drift } Note that all of the elements coming from upstream will go through `expand` at least once. This means that the output of this flow is going to report a drift of zero if producer is fast enough, or a larger drift otherwise. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-testkit.md b/akka-docs/src/main/paradox/java/stream/stream-testkit.md index 6298aa9fbb..a121b27eeb 100644 --- a/akka-docs/src/main/paradox/java/stream/stream-testkit.md +++ b/akka-docs/src/main/paradox/java/stream/stream-testkit.md @@ -20,20 +20,20 @@ elements from a predefined collection, running a constructed test flow and asserting on the results that sink produced. Here is an example of a test for a sink: -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #strict-collection } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #strict-collection } The same strategy can be applied for sources as well. In the next example we have a source that produces an infinite stream of elements. Such source can be tested by asserting that first arbitrary number of elements hold some condition. Here the `take` combinator and `Sink.seq` are very useful. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #grouped-infinite } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #grouped-infinite } When testing a flow we need to attach a source and a sink. As both stream ends are under our control, we can choose sources that tests various edge cases of the flow and sinks that ease assertions. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #folded-stream } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #folded-stream } ## TestKit @@ -45,7 +45,7 @@ One of the more straightforward tests would be to materialize stream to a `CompletionStage` and then use `PatternsCS.pipe` pattern to pipe the result of that future to the probe. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #pipeto-testprobe } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #pipeto-testprobe } Instead of materializing to a future, we can use a `Sink.actorRef` that sends all incoming elements to the given `ActorRef`. Now we can use @@ -53,13 +53,13 @@ assertion methods on `TestProbe` and expect elements one by one as they arrive. We can also assert stream completion by expecting for `onCompleteMessage` which was given to `Sink.actorRef`. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #sink-actorref } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #sink-actorref } Similarly to `Sink.actorRef` that provides control over received elements, we can use `Source.actorRef` and have full control over elements to be sent. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #source-actorref } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #source-actorref } ## Streams TestKit @@ -78,17 +78,17 @@ Be sure to add the module `akka-stream-testkit` to your dependencies. A sink returned by `TestSink.probe` allows manual control over demand and assertions over elements coming downstream. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #test-sink-probe } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #test-sink-probe } A source returned by `TestSource.probe` can be used for asserting demand or controlling when stream is completed or ended with an error. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #test-source-probe } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #test-source-probe } You can also inject exceptions and test sink behaviour on error conditions. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #injecting-failure } +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #injecting-failure } Test source and sink can be used together in combination when testing flows. -@@snip [StreamTestKitDocTest.java](../code/jdocs/stream/StreamTestKitDocTest.java) { #test-source-and-sink } \ No newline at end of file +@@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #test-source-and-sink } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/testing.md b/akka-docs/src/main/paradox/java/testing.md index ae2ff667bd..2ac304d329 100644 --- a/akka-docs/src/main/paradox/java/testing.md +++ b/akka-docs/src/main/paradox/java/testing.md @@ -71,7 +71,7 @@ Having access to the actual `Actor` object allows application of all traditional unit testing techniques on the contained methods. Obtaining a reference is done like this: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-actor-ref } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-actor-ref } Since `TestActorRef` is generic in the actor type it returns the underlying actor with its proper static type. From this point on you may bring @@ -93,7 +93,7 @@ usual. This trick is made possible by the `CallingThreadDispatcher` described below (see [CallingThreadDispatcher](#callingthreaddispatcher)); this dispatcher is set implicitly for any actor instantiated into a `TestActorRef`. -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-behavior } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-behavior } As the `TestActorRef` is a subclass of `LocalActorRef` with a few special extras, also aspects like supervision and restarting work properly, but @@ -123,7 +123,7 @@ any thrown exceptions, then there is another mode available for you: just use the `receive` method on `TestActorRef`, which will be forwarded to the underlying actor: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-expecting-exceptions } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-expecting-exceptions } ### Use Cases @@ -157,7 +157,7 @@ principle stays the same in that a single procedure drives the test. The `TestKit` class contains a collection of tools which makes this common task easy. -@@snip [TestKitSampleTest.java](code/jdocs/testkit/TestKitSampleTest.java) { #fullsample } +@@snip [TestKitSampleTest.java]($code$/java/jdocs/testkit/TestKitSampleTest.java) { #fullsample } The `TestKit` contains an actor named `testActor` which is the entry point for messages to be examined with the various `expectMsg...` @@ -183,7 +183,7 @@ case of failure) so that all actors—including the test actor—are stopped. The above mentioned `expectMsgEquals` is not the only method for formulating assertions concerning received messages, the full set is this: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-expect } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-expect } In these examples, the maximum durations you will find mentioned below are left out, in which case they use the default value from configuration item @@ -243,25 +243,29 @@ messages are returned. In addition to message reception assertions there are also methods which help with message flows: -> - * - `public List receiveWhile(Duration max, Duration idle, Int messages, Function f)` - @@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-receivewhile-full } - Collect messages as long as - * they are matching the given function - * the given time interval is not used up - * the next message is received within the idle timeout - * the number of messages has not yet reached the maximum - All collected messages are returned. - * - `public void awaitCond(Duration max, Duration interval, Supplier p)` - @@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-awaitCond } +`public List receiveWhile(Duration max, Duration idle, Int messages, Function f)` + +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-receivewhile-full } + +Collect messages as long as +* they are matching the given function +* the given time interval is not used up +* the next message is received within the idle timeout +* the number of messages has not yet reached the maximum +All collected messages are returned. + +`public void awaitCond(Duration max, Duration interval, Supplier p)` + +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-awaitCond } + Poll the given condition every `interval` until it returns `true` or the `max` duration is used up. - * - `public void awaitAssert(Duration max, Duration interval, Supplier a)` - @@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-awaitAssert } - Poll the given assert function every `interval` until it does not throw + +`public void awaitAssert(Duration max, Duration interval, Supplier a)` + +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-awaitAssert } + +Poll the given assert function every `interval` until it does not throw an exception or the `max` duration is used up. If the timeout expires the last exception is thrown. @@ -269,11 +273,10 @@ There are also cases where not all messages sent to the test kit are actually relevant to the test, but removing them would mean altering the actors under test. For this purpose it is possible to ignore certain messages: -> - * - `public void ignoreMsg(Function f)` - `public void ignoreMsg()` - @@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-ignoreMsg } +`public void ignoreMsg(Function f)` +`public void ignoreMsg()` + +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-ignoreMsg } ### Expecting Log Messages @@ -284,7 +287,7 @@ handler with the `TestEventListener` and using an `EventFilter` allows assertions on log messages, including those which are generated by exceptions: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-event-filter } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-event-filter } If a number of occurrences is specific—as demonstrated above—then `intercept()` will block until that number of matching messages have been received or the @@ -314,7 +317,7 @@ the positive or negative result must be obtained. Lower time limits need to be checked external to the examination, which is facilitated by a new construct for managing time constraints: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-within } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-within } The block in `within` must complete after a @ref:[Duration](../scala/common/duration.md) which is between `min` and `max`, where the former defaults to zero. The @@ -347,7 +350,7 @@ internally scaled by a factor taken from the [Configuration](), You can scale other durations with the same factor by using `dilated` method in `TestKit`. -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #duration-dilation } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #duration-dilation } ### Using Multiple Probe Actors @@ -358,7 +361,7 @@ Another approach is to use it for creation of simple probe actors to be inserted in the message flows. The functionality is best explained using a small example: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-probe } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe } This simple test verifies an equally simple Forwarder actor by injecting a probe as the forwarder’s target. Another example would be two actors A and B @@ -370,12 +373,12 @@ the test setup. If you have many test probes, you can name them to get meaningful actor names in test logs and assertions: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-probe-with-custom-name } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-with-custom-name } Probes may also be equipped with custom assertions to make your test code even more concise and clear: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-special-probe } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-special-probe } You have complete flexibility here in mixing and matching the `TestKit` facilities with your own checks and choosing an intuitive @@ -397,7 +400,7 @@ means that it is dangerous to try watching e.g. `TestActorRef` from a A `TestKit` can register itself for DeathWatch of any other actor: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-probe-watch } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-watch } #### Replying to Messages Received by Probes @@ -406,14 +409,14 @@ The probe stores the sender of the last dequeued message (i.e. after its `getLastSender()` method. This information can also implicitly be used for having the probe reply to the last received message: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-probe-reply } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-reply } #### Forwarding Messages Received by Probes The probe can also forward a received message (i.e. after its `expectMsg*` reception), retaining the original sender: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-probe-forward } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-probe-forward } #### Auto-Pilot @@ -424,7 +427,7 @@ keep a test running and verify traces later you can also install an This code can be used to forward messages, e.g. in a chain `A --> Probe --> B`, as long as a certain protocol is obeyed. -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-auto-pilot } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-auto-pilot } The `run` method must return the auto-pilot for the next message, wrapped in an `Option`; setting it to `None` terminates the auto-pilot. @@ -437,7 +440,7 @@ described [above](#testkit-within) is local to each probe. Hence, probes do not react to each other's deadlines or to the deadline set in an enclosing `TestKit` instance: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #test-within-probe } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #test-within-probe } Here, the `expectMsgEquals` call will use the default timeout. @@ -458,14 +461,14 @@ Conversely, a parent's binding to its child can be lessened as follows: For example, the structure of the code you want to test may follow this pattern: -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #test-example } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-example } #### Introduce child to its parent The first option is to avoid use of the `context.parent` function and create a child with a custom parent by passing an explicit reference to its parent instead. -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #test-dependentchild } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-dependentchild } #### Create the child using TestKit @@ -473,7 +476,7 @@ The `TestKit` class can in fact create actors that will run with the test probe This will cause any messages the child actor sends to *getContext().getParent()* to end up in the test probe. -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #test-TestProbe-parent } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-TestProbe-parent } #### Using a fabricated parent @@ -481,26 +484,26 @@ If you prefer to avoid modifying the child constructor you can create a fabricated parent in your test. This, however, does not enable you to test the parent actor in isolation. -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #test-fabricated-parent-creator } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-fabricated-parent-creator } -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #test-fabricated-parent } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-fabricated-parent } #### Externalize child making from the parent Alternatively, you can tell the parent how to create its child. There are two ways to do this: by giving it a `Props` object or by giving it a function which takes care of creating the child actor: -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #test-dependentparent } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-dependentparent } -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #test-dependentparent-generic } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #test-dependentparent-generic } Creating the `Actor` is straightforward and the function may look like this in your test code: -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #child-maker-test } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #child-maker-test } And like this in your application code: -@@snip [ParentChildTest.java](code/jdocs/testkit/ParentChildTest.java) { #child-maker-prod } +@@snip [ParentChildTest.java]($code$/java/jdocs/testkit/ParentChildTest.java) { #child-maker-prod } Which of these methods is the best depends on what is most important to test. The most generic option is to create the parent actor by passing it a function that is @@ -520,7 +523,7 @@ so long as all intervening actors run on this dispatcher. Just set the dispatcher as you normally would: -@@snip [TestKitDocTest.java](code/jdocs/testkit/TestKitDocTest.java) { #calling-thread-dispatcher } +@@snip [TestKitDocTest.java]($code$/java/jdocs/testkit/TestKitDocTest.java) { #calling-thread-dispatcher } ### How it works diff --git a/akka-docs/src/main/paradox/java/typed-actors.md b/akka-docs/src/main/paradox/java/typed-actors.md index c226fba049..f2e7e8ee94 100644 --- a/akka-docs/src/main/paradox/java/typed-actors.md +++ b/akka-docs/src/main/paradox/java/typed-actors.md @@ -36,7 +36,7 @@ They have their niche, use them sparingly. Before we create our first Typed Actor we should first go through the tools that we have at our disposal, it's located in `akka.actor.TypedActor`. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-extension-tools } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-extension-tools } @@@ warning @@ -53,34 +53,34 @@ To create a Typed Actor you need to have one or more interfaces, and one impleme The following imports are assumed: -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #imports } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #imports } Our example interface: -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } Our example implementation of that interface: -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } The most trivial way of creating a Typed Actor instance of our `Squarer`: -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create1 } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create1 } First type is the type of the proxy, the second type is the type of the implementation. If you need to call a specific constructor you do it like this: -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create2 } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create2 } Since you supply a `Props`, you can specify which dispatcher to use, what the default timeout should be used and more. Now, our `Squarer` doesn't have any methods, so we'd better add those. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } Alright, now we've got some methods we can call, but we need to implement those in `SquarerImpl`. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } Excellent, now we have an interface and an implementation of that interface, and we know how to create a Typed Actor from that, so let's look at calling these methods. @@ -107,18 +107,18 @@ we *strongly* recommend that parameters passed are immutable. ### One-way message send -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-oneway } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-oneway } As simple as that! The method will be executed on another thread; asynchronously. ### Request-reply message send -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-option } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-option } This will block for as long as the timeout that was set in the `Props` of the Typed Actor, if needed. It will return `None` if a timeout occurs. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-strict } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-strict } This will block for as long as the timeout that was set in the `Props` of the Typed Actor, if needed. It will throw a `java.util.concurrent.TimeoutException` if a timeout occurs. @@ -130,7 +130,7 @@ interface method. ### Request-reply-with-future message send -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-future } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-future } This call is asynchronous, and the Future returned can be used for asynchronous composition. @@ -138,11 +138,11 @@ This call is asynchronous, and the Future returned can be used for asynchronous Since Akka's Typed Actors are backed by Akka Actors they must be stopped when they aren't needed anymore. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-stop } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-stop } This asynchronously stops the Typed Actor associated with the specified proxy ASAP. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-poisonpill } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-poisonpill } This asynchronously stops the Typed Actor associated with the specified proxy after it's done with all calls that were made prior to this call. @@ -152,7 +152,7 @@ after it's done with all calls that were made prior to this call. Since you can obtain a contextual Typed Actor Extension by passing in an `ActorContext` you can create child Typed Actors by invoking `typedActorOf(..)` on that. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-hierarchy } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-hierarchy } You can also create a child Typed Actor in regular Akka Actors by giving the `AbstractActor.ActorContext` as an input parameter to TypedActor.get(…). @@ -192,7 +192,7 @@ This is usable if you want to communicate remotely with TypedActors on other mac Since `TypedActors` are backed by `Akka Actors`, you can use `typedActorOf` to proxy `ActorRefs` potentially residing on remote nodes. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-actor-remote } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-remote } ## Typed Router pattern @@ -202,10 +202,10 @@ which can implement a specific routing logic, such as `smallest-mailbox` or `con Routers are not provided directly for typed actors, but it is really easy to leverage an untyped router and use a typed proxy in front of it. To showcase this let's create typed actors that assign themselves some random `id`, so we know that in fact, the router has sent the message to different actors: -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-router-types } +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-router-types } In order to round robin among a few instances of such actors, you can simply create a plain untyped router, and then facade it with a `TypedActor` like shown in the example below. This works because typed actors of course communicate using the same mechanisms as normal actors, and methods calls on them get transformed into message sends of `MethodCall` messages. -@@snip [TypedActorDocTest.java](code/jdocs/actor/TypedActorDocTest.java) { #typed-router } \ No newline at end of file +@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-router } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/actordsl.md b/akka-docs/src/main/paradox/scala/actordsl.md index 10cdb83461..1d629947fa 100644 --- a/akka-docs/src/main/paradox/scala/actordsl.md +++ b/akka-docs/src/main/paradox/scala/actordsl.md @@ -13,13 +13,13 @@ Simple actors—for example one-off workers or even when trying things out in th REPL—can be created more concisely using the `Act` trait. The supporting infrastructure is bundled in the following import: -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #import } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #import } This import is assumed for all code samples throughout this section. The implicit actor system serves as `ActorRefFactory` for all examples below. To define a simple actor, the following is sufficient: -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #simple-actor } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #simple-actor } Here, `actor` takes the role of either `system.actorOf` or `context.actorOf`, depending on which context it is called in: it takes an @@ -32,7 +32,7 @@ The two possible ways of issuing a `context.become` (replacing or adding the new behavior) are offered separately to enable a clutter-free notation of nested receives: -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #becomeStacked } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #becomeStacked } Please note that calling `unbecome` more often than `becomeStacked` results in the original behavior being installed, which in case of the `Act` @@ -43,17 +43,17 @@ construction). Life-cycle hooks are also exposed as DSL elements (see @ref:[Start Hook](actors.md#start-hook-scala) and @ref:[Stop Hook](actors.md#stop-hook-scala)), where later invocations of the methods shown below will replace the contents of the respective hooks: -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #simple-start-stop } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #simple-start-stop } The above is enough if the logical life-cycle of the actor matches the restart cycles (i.e. `whenStopping` is executed before a restart and `whenStarting` afterwards). If that is not desired, use the following two hooks (see @ref:[Restart Hooks](actors.md#restart-hook-scala)): -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #failing-actor } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #failing-actor } It is also possible to create nested actors, i.e. grand-children, like this: -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #nested-actor } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #nested-actor } @@@ note @@ -67,7 +67,7 @@ The grand-child will be supervised by the child; the supervisor strategy for this relationship can also be configured using a DSL element (supervision directives are part of the `Act` trait): -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #supervise-with } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #supervise-with } ### Actor with `Stash` @@ -79,4 +79,4 @@ runtime erased type is just an anonymous subtype of `Act`). The purpose is to automatically use the appropriate deque-based mailbox type required by `Stash`. If you want to use this magic, simply extend `ActWithStash`: -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #act-with-stash } \ No newline at end of file +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #act-with-stash } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/actors.md b/akka-docs/src/main/paradox/scala/actors.md index ade6be5860..c8f9d134ad 100644 --- a/akka-docs/src/main/paradox/scala/actors.md +++ b/akka-docs/src/main/paradox/scala/actors.md @@ -32,7 +32,7 @@ along with the implementation of how the messages should be processed. Here is an example: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } Please note that the Akka Actor `receive` message loop is exhaustive, which is different compared to Erlang and the late Scala Actors. This means that you @@ -59,7 +59,7 @@ creating an actor including associated deployment information (e.g. which dispatcher to use, see more below). Here are some examples of how to create a `Props` instance. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #creating-props } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #creating-props } The second variant shows how to pass constructor arguments to the `Actor` being created, but it should only be used outside of actors as @@ -80,7 +80,7 @@ for cases when the actor constructor takes value classes as arguments. #### Dangerous Variants -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #creating-props-deprecated } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #creating-props-deprecated } This method is not recommended to be used within another actor because it encourages to close over the enclosing scope, resulting in non-serializable @@ -110,13 +110,13 @@ There are two edge cases in actor creation with `Props`: * An actor with `AnyVal` arguments. -@@snip [PropsEdgeCaseSpec.scala](code/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-value-class } +@@snip [PropsEdgeCaseSpec.scala]($code$/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-value-class } -@@snip [PropsEdgeCaseSpec.scala](code/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-value-class-example } +@@snip [PropsEdgeCaseSpec.scala]($code$/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-value-class-example } * An actor with default constructor values. -@@snip [PropsEdgeCaseSpec.scala](code/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-default-values } +@@snip [PropsEdgeCaseSpec.scala]($code$/scala/docs/actor/PropsEdgeCaseSpec.scala) { #props-edge-cases-default-values } In both cases an `IllegalArgumentException` will be thrown stating no matching constructor could be found. @@ -133,13 +133,13 @@ associated with using the `Props.apply(...)` method which takes a by-name argument, since within a companion object the given code block will not retain a reference to its enclosing scope: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #props-factory } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #props-factory } Another good practice is to declare what messages an Actor can receive in the companion object of the Actor, which makes easier to know what it can receive: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #messages-in-companion } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #messages-in-companion } ### Creating Actors with Props @@ -147,13 +147,13 @@ Actors are created by passing a `Props` instance into the `actorOf` factory method which is available on `ActorSystem` and `ActorContext`. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #system-actorOf } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #system-actorOf } Using the `ActorSystem` will create top-level actors, supervised by the actor system’s provided guardian actor, while using an actor’s context will create a child actor. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #context-actorOf } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #context-actorOf } It is recommended to create a hierarchy of children, grand-children and so on such that it fits the logical failure-handling structure of the application, @@ -184,7 +184,7 @@ value classes. In these cases you should either unpack the arguments or create the props by calling the constructor manually: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #actor-with-value-class-argument } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #actor-with-value-class-argument } ### Dependency Injection @@ -193,7 +193,7 @@ be part of the `Props` as well, as described [above](Props_). But there are cases when a factory method must be used, for example when the actual constructor arguments are determined by a dependency injection framework. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #creating-indirectly } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #creating-indirectly } @@@ warning @@ -220,14 +220,14 @@ cannot do: receiving multiple replies (e.g. by subscribing an `ActorRef` to a notification service) and watching other actors’ lifecycle. For these purposes there is the `Inbox` class: -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #inbox } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #inbox } There is an implicit conversion from inbox to actor reference which means that in this example the sender reference will be that of the actor hidden away within the inbox. This allows the reply to be received on the last line. Watching an actor is quite simple as well: -@@snip [ActorDSLSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #watch } +@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #watch } ## Actor API @@ -266,12 +266,12 @@ time). You can import the members in the `context` to avoid prefixing access with `context.` -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #import-context } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #import-context } The remaining visible methods are user-overridable life-cycle hooks which are described in the following: -@@snip [Actor.scala]../../../../../akka-actor/src/main/scala/akka/actor/Actor.scala) { #lifecycle-hooks } +@@snip [Actor.scala]($akka$/akka-actor/src/main/scala/akka/actor/Actor.scala) { #lifecycle-hooks } The implementations shown above are the defaults provided by the `Actor` trait. @@ -332,7 +332,7 @@ termination (see [Stopping Actors](#stopping-actors)). This service is provided Registering a monitor is easy: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #watch } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #watch } It should be noted that the `Terminated` message is generated independent of the order in which registration and termination occur. @@ -357,7 +357,7 @@ no `Terminated` message for that actor will be processed anymore. Right after starting the actor, its `preStart` method is invoked. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #preStart } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #preStart } This method is called when the actor is first created. During restarts it is called by the default implementation of `postRestart`, which means that @@ -430,7 +430,7 @@ actors may look up other actors by specifying absolute or relative paths—logical or physical—and receive back an `ActorSelection` with the result: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #selection-local } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #selection-local } @@@ note @@ -459,7 +459,7 @@ structure, i.e. the supervisor. The path elements of an actor selection may contain wildcard patterns allowing for broadcasting of messages to that section: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #selection-wildcard } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #selection-wildcard } Messages can be sent via the `ActorSelection` and the path of the `ActorSelection` is looked up when delivering each message. If the selection @@ -475,7 +475,7 @@ actors which are traversed in the sense that if a concrete name lookup fails negative result is generated. Please note that this does not mean that delivery of that reply is guaranteed, it still is a normal message. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #identify } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #identify } You can also acquire an `ActorRef` for an `ActorSelection` with the `resolveOne` method of the `ActorSelection`. It returns a `Future` @@ -485,7 +485,7 @@ didn't complete within the supplied *timeout*. Remote actor addresses may also be looked up, if @ref:[remoting](remoting.md) is enabled: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #selection-remote } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #selection-remote } An example demonstrating actor look-up is given in @ref:[Remoting Sample](remoting.md#remote-sample-scala). @@ -534,7 +534,7 @@ remoting. So always prefer `tell` for performance, and only `ask` if you must. This is the preferred way of sending messages. No blocking waiting for a message. This gives the best concurrency and scalability characteristics. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #tell } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #tell } If invoked from within an Actor, then the sending actor reference will be implicitly passed along with the message and available to the receiving Actor @@ -550,7 +550,7 @@ If invoked from an instance that is **not** an Actor the sender will be The `ask` pattern involves actors as well as futures, hence it is offered as a use pattern rather than a method on `ActorRef`: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #ask-pipeTo } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #ask-pipeTo } This example demonstrates `ask` together with the `pipeTo` pattern on futures, because this is likely to be a common combination. Please note that @@ -573,7 +573,7 @@ This is *not done automatically* when an actor throws an exception while process @@@ -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #reply-exception } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #reply-exception } If the actor does not complete the future, it will expire after the timeout period, completing it with an `AskTimeoutException`. The timeout is @@ -581,11 +581,11 @@ taken from one of the following locations in order of precedence: 1. explicitly given timeout as in: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #using-explicit-timeout } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #using-explicit-timeout } 2. implicit argument of type `akka.util.Timeout`, e.g. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #using-implicit-timeout } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #using-implicit-timeout } See @ref:[Futures](futures.md) for more information on how to await or query a future. @@ -614,19 +614,19 @@ original sender address/reference is maintained even though the message is going through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #forward } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #forward } ## Receive messages An Actor has to implement the `receive` method to receive messages: -@@snip [Actor.scala]../../../../../akka-actor/src/main/scala/akka/actor/Actor.scala) { #receive } +@@snip [Actor.scala]($akka$/akka-actor/src/main/scala/akka/actor/Actor.scala) { #receive } This method returns a `PartialFunction`, e.g. a ‘match/case’ clause in which the message can be matched against the different case clauses using Scala pattern matching. Here is an example: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } ## Reply to messages @@ -658,7 +658,7 @@ timeout there must have been an idle period beforehand as configured via this me Once set, the receive timeout stays in effect (i.e. continues firing repeatedly after inactivity periods). Pass in *Duration.Undefined* to switch off this feature. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #receive-timeout } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #receive-timeout } Messages marked with `NotInfluenceReceiveTimeout` will not reset the timer. This can be useful when `ReceiveTimeout` should be fired by external inactivity but not influenced by internal activity, @@ -673,7 +673,7 @@ the actor itself or child actors and the system for stopping top level actors. T termination of the actor is performed asynchronously, i.e. `stop` may return before the actor is stopped. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #stoppingActors-actor } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #stoppingActors-actor } Processing of the current message, if any, will continue before the actor is stopped, but additional messages in the mailbox will not be processed. By default these @@ -699,7 +699,7 @@ whole system. The `postStop()` hook is invoked after an actor is fully stopped. This enables cleaning up of resources: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #postStop } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #postStop } @@@ note @@ -724,9 +724,9 @@ in the mailbox. `gracefulStop` is useful if you need to wait for termination or compose ordered termination of several actors: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #gracefulStop } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #gracefulStop } -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #gracefulStop-actor } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #gracefulStop-actor } When `gracefulStop()` returns successfully, the actor’s `postStop()` hook will have been executed: there exists a happens-before edge between the end of @@ -757,7 +757,7 @@ services in a specific order and perform registered tasks during the shutdown pr The order of the shutdown phases is defined in configuration `akka.coordinated-shutdown.phases`. The default phases are defined as: -@@snip [reference.conf]../../../../../akka-actor/src/main/resources/reference.conf) { #coordinated-shutdown-phases } +@@snip [reference.conf]($akka$/akka-actor/src/main/resources/reference.conf) { #coordinated-shutdown-phases } More phases can be be added in the application's configuration if needed by overriding a phase with an additional `depends-on`. Especially the phases `before-service-unbind`, `before-cluster-shutdown` and @@ -769,7 +769,7 @@ The phases are ordered with [topological](https://en.wikipedia.org/wiki/Topologi Tasks can be added to a phase with: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-addTask } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-addTask } The returned `Future[Done]` should be completed when the task is completed. The task name parameter is only used for debugging/logging. @@ -788,7 +788,7 @@ added too late will not be run. To start the coordinated shutdown process you can invoke `run` on the `CoordinatedShutdown` extension: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-run } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-run } It's safe to call the `run` method multiple times. It will only run once. @@ -817,7 +817,7 @@ If you have application specific JVM shutdown hooks it's recommended that you re `CoordinatedShutdown` so that they are running before Akka internal shutdown hooks, e.g. those shutting down Akka Remoting (Artery). -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-jvm-hook } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #coordinated-shutdown-jvm-hook } For some tests it might be undesired to terminate the `ActorSystem` via `CoordinatedShutdown`. You can disable that by adding the following to the configuration of the `ActorSystem` that is @@ -849,7 +849,7 @@ Please note that the actor will revert to its original behavior when restarted b To hotswap the Actor behavior using `become`: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #hot-swap-actor } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #hot-swap-actor } This variant of the `become` method is useful for many different things, such as to implement a Finite State Machine (FSM, for an example see [Dining @@ -863,7 +863,7 @@ of “pop” operations (i.e. `unbecome`) matches the number of “push” ones in the long run, otherwise this amounts to a memory leak (which is why this behavior is not the default). -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #swapper } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #swapper } ### Encoding Scala Actors nested receives without accidentally leaking memory @@ -892,7 +892,7 @@ mailbox, see the documentation on mailboxes: @ref:[Mailboxes](mailboxes.md). Here is an example of the `Stash` in action: -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #stash } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #stash } Invoking `stash()` adds the current message (the message that the actor received last) to the actor's stash. It is typically invoked @@ -994,7 +994,7 @@ For example, imagine you have a set of actors which are either `Producers` or `C have an actor share both behaviors. This can be easily achieved without having to duplicate code by extracting the behaviors to traits and implementing the actor's `receive` as combination of these partial functions. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #receive-orElse } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #receive-orElse } Instead of inheritance the same pattern can be applied via composition - one would simply compose the receive method using partial functions from delegates. @@ -1027,7 +1027,7 @@ this behavior, and ensure that there is only one call to `preStart()`. One useful usage of this pattern is to disable creation of new `ActorRefs` for children during restarts. This can be achieved by overriding `preRestart()`: -@@snip [InitializationDocSpec.scala](code/docs/actor/InitializationDocSpec.scala) { #preStartInit } +@@snip [InitializationDocSpec.scala]($code$/scala/docs/actor/InitializationDocSpec.scala) { #preStartInit } Please note, that the child actors are *still restarted*, but no new `ActorRef` is created. One can recursively apply the same principles for the children, ensuring that their `preStart()` method is called only at the creation of their @@ -1042,7 +1042,7 @@ for example in the presence of circular dependencies. In this case the actor sho and use `become()` or a finite state-machine state transition to encode the initialized and uninitialized states of the actor. -@@snip [InitializationDocSpec.scala](code/docs/actor/InitializationDocSpec.scala) { #messageInit } +@@snip [InitializationDocSpec.scala]($code$/scala/docs/actor/InitializationDocSpec.scala) { #messageInit } If the actor may receive messages before it has been initialized, a useful tool can be the `Stash` to save messages until the initialization finishes, and replaying them after the actor became initialized. diff --git a/akka-docs/src/main/paradox/scala/additional/faq.md b/akka-docs/src/main/paradox/scala/additional/faq.md index 90f2bcc087..20b6d22ee5 100644 --- a/akka-docs/src/main/paradox/scala/additional/faq.md +++ b/akka-docs/src/main/paradox/scala/additional/faq.md @@ -79,7 +79,7 @@ exhaustiveness. Here is an example where the compiler will warn you that the match in receive isn't exhaustive: -@@snip [Faq.scala](code/docs/faq/Faq.scala) { #exhaustiveness-check } +@@snip [Faq.scala]($code$/scala/docs/faq/Faq.scala) { #exhaustiveness-check } ## Remoting diff --git a/akka-docs/src/main/paradox/scala/additional/osgi.md b/akka-docs/src/main/paradox/scala/additional/osgi.md index 933f02030d..d9a4827d86 100644 --- a/akka-docs/src/main/paradox/scala/additional/osgi.md +++ b/akka-docs/src/main/paradox/scala/additional/osgi.md @@ -96,7 +96,7 @@ dynamic in this way. ActorRefs may safely be exposed to other bundles. To bootstrap Akka inside an OSGi environment, you can use the `akka.osgi.ActorSystemActivator` class to conveniently set up the ActorSystem. -@@snip [Activator.scala]../../../../../akka-osgi/src/test/scala/docs/osgi/Activator.scala) { #Activator } +@@snip [Activator.scala]($akka$/akka-osgi/src/test/scala/docs/osgi/Activator.scala) { #Activator } The goal here is to map the OSGi lifecycle more directly to the Akka lifecycle. The `ActorSystemActivator` creates the actor system with a class loader that finds resources (`application.conf` and `reference.conf` files) and classes diff --git a/akka-docs/src/main/paradox/scala/agents.md b/akka-docs/src/main/paradox/scala/agents.md index cedeab3a4d..327c9f9cb9 100644 --- a/akka-docs/src/main/paradox/scala/agents.md +++ b/akka-docs/src/main/paradox/scala/agents.md @@ -42,18 +42,18 @@ Agents are created by invoking `Agent(value)` passing in the Agent's initial value and providing an implicit `ExecutionContext` to be used for it, for these examples we're going to use the default global one, but YMMV: -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #create } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #create } ## Reading an Agent's value Agents can be dereferenced (you can get an Agent's value) by invoking the Agent with parentheses like this: -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #read-apply } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #read-apply } Or by using the get method: -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #read-get } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #read-get } Reading an Agent's current value does not involve any message passing and happens immediately. So while updates to an Agent are asynchronous, reading the @@ -69,7 +69,7 @@ the update will be applied but dispatches to an Agent from a single thread will occur in order. You apply a value or a function by invoking the `send` function. -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #send } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #send } You can also dispatch a function to update the internal state but on its own thread. This does not use the reactive thread pool and can be used for @@ -77,21 +77,21 @@ long-running or blocking operations. You do this with the `sendOff` method. Dispatches using either `sendOff` or `send` will still be executed in order. -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #send-off } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #send-off } All `send` methods also have a corresponding `alter` method that returns a `Future`. See @ref:[Futures](futures.md) for more information on `Futures`. -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #alter } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #alter } -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #alter-off } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #alter-off } ## Awaiting an Agent's value You can also get a `Future` to the Agents value, that will be completed after the currently queued updates have completed: -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #read-future } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #read-future } See @ref:[Futures](futures.md) for more information on `Futures`. @@ -104,7 +104,7 @@ as-is. They are so-called 'persistent'. Example of monadic usage: -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #monadic-example } +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #monadic-example } ## Configuration @@ -120,4 +120,4 @@ that transaction. If you send to an Agent within a transaction then the dispatch to the Agent will be held until that transaction commits, and discarded if the transaction is aborted. Here's an example: -@@snip [AgentDocSpec.scala](code/docs/agent/AgentDocSpec.scala) { #transfer-example } \ No newline at end of file +@@snip [AgentDocSpec.scala]($code$/scala/docs/agent/AgentDocSpec.scala) { #transfer-example } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/camel.md b/akka-docs/src/main/paradox/scala/camel.md index 973945713f..2733f39f5e 100644 --- a/akka-docs/src/main/paradox/scala/camel.md +++ b/akka-docs/src/main/paradox/scala/camel.md @@ -27,7 +27,7 @@ APIs. The [camel-extra](http://code.google.com/p/camel-extra/) project provides Usage of Camel's integration components in Akka is essentially a one-liner. Here's an example. -@@snip [Introduction.scala](code/docs/camel/Introduction.scala) { #Consumer-mina } +@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #Consumer-mina } The above example exposes an actor over a TCP endpoint via Apache Camel's [Mina component](http://camel.apache.org/mina2.html). The actor implements the endpointUri method to define @@ -36,14 +36,14 @@ clients can immediately send messages to and receive responses from that actor. If the message exchange should go over HTTP (via Camel's `Jetty component`_), only the actor's endpointUri method must be changed. -@@snip [Introduction.scala](code/docs/camel/Introduction.scala) { #Consumer } +@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #Consumer } ### Producer Actors can also trigger message exchanges with external systems i.e. produce to Camel endpoints. -@@snip [Introduction.scala](code/docs/camel/Introduction.scala) { #imports #Producer } +@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #imports #Producer } In the above example, any message sent to this actor will be sent to the JMS queue `orders`. Producer actors may choose from the same set of Camel @@ -73,7 +73,7 @@ The `CamelExtension` object provides access to the [Camel](@github@/akka-camel/s The [Camel](@github@/akka-camel/src/main/scala/akka/camel/Camel.scala) trait in turn provides access to two important Apache Camel objects, the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and the `ProducerTemplate`_. Below you can see how you can get access to these Apache Camel objects. -@@snip [Introduction.scala](code/docs/camel/Introduction.scala) { #CamelExtension } +@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelExtension } One `CamelExtension` is only loaded once for every one `ActorSystem`, which makes it safe to call the `CamelExtension` at any point in your code to get to the Apache Camel objects associated with it. There is one [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and one `ProducerTemplate`_ for every one `ActorSystem` that uses a `CamelExtension`. @@ -83,7 +83,7 @@ This interface define a single method `getContext` used to load the [CamelContex Below an example on how to add the ActiveMQ component to the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java), which is required when you would like to use the ActiveMQ component. -@@snip [Introduction.scala](code/docs/camel/Introduction.scala) { #CamelExtensionAddComponent } +@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelExtensionAddComponent } The [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) joins the lifecycle of the `ActorSystem` and `CamelExtension` it is associated with; the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) is started when the `CamelExtension` is created, and it is shut down when the associated `ActorSystem` is shut down. The same is true for the `ProducerTemplate`_. @@ -96,12 +96,12 @@ Publication is done asynchronously; setting up an endpoint may still be in progr requested the actor to be created. Some Camel components can take a while to startup, and in some cases you might want to know when the endpoints are activated and ready to be used. The [Camel](@github@/akka-camel/src/main/scala/akka/camel/Camel.scala) trait allows you to find out when the endpoint is activated or deactivated. -@@snip [Introduction.scala](code/docs/camel/Introduction.scala) { #CamelActivation } +@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelActivation } The above code shows that you can get a `Future` to the activation of the route from the endpoint to the actor, or you can wait in a blocking fashion on the activation of the route. An `ActivationTimeoutException` is thrown if the endpoint could not be activated within the specified timeout. Deactivation works in a similar fashion: -@@snip [Introduction.scala](code/docs/camel/Introduction.scala) { #CamelDeactivation } +@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelDeactivation } Deactivation of a Consumer or a Producer actor happens when the actor is terminated. For a Consumer, the route to the actor is stopped. For a Producer, the [SendProcessor](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java) is stopped. A `DeActivationTimeoutException` is thrown if the associated camel objects could not be deactivated within the specified timeout. @@ -113,7 +113,7 @@ trait. For example, the following actor class (Consumer1) implements the endpointUri method, which is declared in the Consumer trait, in order to receive messages from the `file:data/input/actor` Camel endpoint. -@@snip [Consumers.scala](code/docs/camel/Consumers.scala) { #Consumer1 } +@@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer1 } Whenever a file is put into the data/input/actor directory, its content is picked up by the Camel [file component](http://camel.apache.org/file2.html) and sent as message to the @@ -125,7 +125,7 @@ Here's another example that sets the endpointUri to component`_ to start an embedded [Jetty](http://www.eclipse.org/jetty/) server, accepting HTTP connections from localhost on port 8877. -@@snip [Consumers.scala](code/docs/camel/Consumers.scala) { #Consumer2 } +@@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer2 } After starting the actor, clients can send messages to that actor by POSTing to `http://localhost:8877/camel/default`. The actor sends a response by using the @@ -152,7 +152,7 @@ In this case, consumer actors must reply either with a special akka.camel.Ack message (positive acknowledgement) or a akka.actor.Status.Failure (negative acknowledgement). -@@snip [Consumers.scala](code/docs/camel/Consumers.scala) { #Consumer3 } +@@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer3 } ### Consumer timeout @@ -169,13 +169,13 @@ and the actor replies to the endpoint when the response is ready. The ask reques result in the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java) failing with a TimeoutException set on the failure of the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java). The timeout on the consumer actor can be overridden with the `replyTimeout`, as shown below. -@@snip [Consumers.scala](code/docs/camel/Consumers.scala) { #Consumer4 } +@@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer4 } ## Producer Actors For sending messages to Camel endpoints, actors need to mixin the [Producer](@github@/akka-camel/src/main/scala/akka/camel/Producer.scala) trait and implement the endpointUri method. -@@snip [Producers.scala](code/docs/camel/Producers.scala) { #Producer1 } +@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Producer1 } Producer1 inherits a default implementation of the receive method from the Producer trait. To customize a producer actor's default behavior you must override the [Producer](@github@/akka-camel/src/main/scala/akka/camel/Producer.scala).transformResponse and @@ -189,7 +189,7 @@ configured endpoint) will, by default, be returned to the original sender. The following example uses the ask pattern to send a message to a Producer actor and waits for a response. -@@snip [Producers.scala](code/docs/camel/Producers.scala) { #AskProducer } +@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #AskProducer } The future contains the response CamelMessage, or an `AkkaCamelException` when an error occurred, which contains the headers of the response. @@ -201,12 +201,12 @@ response processing by overriding the routeResponse method. In the following exa message is forwarded to a target actor instead of being replied to the original sender. -@@snip [Producers.scala](code/docs/camel/Producers.scala) { #RouteResponse } +@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #RouteResponse } Before producing messages to endpoints, producer actors can pre-process them by overriding the [Producer](@github@/akka-camel/src/main/scala/akka/camel/Producer.scala).transformOutgoingMessage method. -@@snip [Producers.scala](code/docs/camel/Producers.scala) { #TransformOutgoingMessage } +@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #TransformOutgoingMessage } ### Producer configuration options @@ -215,14 +215,14 @@ one-way or two-way (by initiating in-only or in-out message exchanges, respectively). By default, the producer initiates an in-out message exchange with the endpoint. For initiating an in-only exchange, producer actors have to override the oneway method to return true. -@@snip [Producers.scala](code/docs/camel/Producers.scala) { #Oneway } +@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Oneway } ### Message correlation To correlate request with response messages, applications can set the *Message.MessageExchangeId* message header. -@@snip [Producers.scala](code/docs/camel/Producers.scala) { #Correlate } +@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Correlate } ### ProducerTemplate @@ -230,12 +230,12 @@ The [Producer](@github@/akka-camel/src/main/scala/akka/camel/Producer.scala) tra convenient way for actors to produce messages to Camel endpoints. Actors may also use a Camel `ProducerTemplate`_ for producing messages to endpoints. -@@snip [Producers.scala](code/docs/camel/Producers.scala) { #ProducerTemplate } +@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #ProducerTemplate } For initiating a two-way message exchange, one of the `ProducerTemplate.request*` methods must be used. -@@snip [Producers.scala](code/docs/camel/Producers.scala) { #RequestProducerTemplate } +@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #RequestProducerTemplate } ## Asynchronous routing @@ -349,7 +349,7 @@ actor's path. the Akka camel package contains an implicit `toActorRouteDefinitio reference an `ActorRef` directly as shown in the below example, The route starts from a [Jetty](http://www.eclipse.org/jetty/) endpoint and ends at the target actor. -@@snip [CustomRoute.scala](code/docs/camel/CustomRoute.scala) { #CustomRoute } +@@snip [CustomRoute.scala]($code$/scala/docs/camel/CustomRoute.scala) { #CustomRoute } When a message is received on the jetty endpoint, it is routed to the Responder actor, which in return replies back to the client of the HTTP request. @@ -366,7 +366,7 @@ For example, an extension could be a custom error handler that redelivers messag The following examples demonstrate how to extend a route to a consumer actor for handling exceptions thrown by that actor. -@@snip [CustomRoute.scala](code/docs/camel/CustomRoute.scala) { #ErrorThrowingConsumer } +@@snip [CustomRoute.scala]($code$/scala/docs/camel/CustomRoute.scala) { #ErrorThrowingConsumer } The above ErrorThrowingConsumer sends the Failure back to the sender in preRestart because the Exception that is thrown in the actor would diff --git a/akka-docs/src/main/paradox/scala/cluster-client.md b/akka-docs/src/main/paradox/scala/cluster-client.md index ac17fbcd33..e112334214 100644 --- a/akka-docs/src/main/paradox/scala/cluster-client.md +++ b/akka-docs/src/main/paradox/scala/cluster-client.md @@ -88,17 +88,17 @@ akka.extensions = ["akka.cluster.client.ClusterClientReceptionist"] Next, register the actors that should be available for the client. -@@snip [ClusterClientSpec.scala]../../../../../akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #server } +@@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #server } On the client you create the `ClusterClient` actor and use it as a gateway for sending messages to the actors identified by their path (without address information) somewhere in the cluster. -@@snip [ClusterClientSpec.scala]../../../../../akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #client } +@@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #client } The `initialContacts` parameter is a `Set[ActorPath]`, which can be created like this: -@@snip [ClusterClientSpec.scala]../../../../../akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #initialContacts } +@@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #initialContacts } You will probably define the address information of the initial contact points in configuration or system property. See also [Configuration](#cluster-client-config-scala). @@ -129,11 +129,11 @@ The following code snippet declares an actor that will receive notifications on receptionists), as they become available. The code illustrates subscribing to the events and receiving the `ClusterClient` initial state. -@@snip [ClusterClientSpec.scala]../../../../../akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #clientEventsListener } +@@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #clientEventsListener } Similarly we can have an actor that behaves in a similar fashion for learning what cluster clients contact a `ClusterClientReceptionist`: -@@snip [ClusterClientSpec.scala]../../../../../akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #receptionistEventsListener } +@@snip [ClusterClientSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala) { #receptionistEventsListener } ## Dependencies @@ -161,7 +161,7 @@ maven: The `ClusterClientReceptionist` extension (or `ClusterReceptionistSettings`) can be configured with the following properties: -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) { #receptionist-ext-config } +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #receptionist-ext-config } The following configuration properties are read by the `ClusterClientSettings` when created with a `ActorSystem` parameter. It is also possible to amend the `ClusterClientSettings` @@ -169,7 +169,7 @@ or create it from another config section with the same layout as below. `Cluster a parameter to the `ClusterClient.props` factory method, i.e. each client can be configured with different settings if needed. -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) { #cluster-client-config } +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #cluster-client-config } ## Failure handling diff --git a/akka-docs/src/main/paradox/scala/cluster-metrics.md b/akka-docs/src/main/paradox/scala/cluster-metrics.md index f48f6059be..85af0468ec 100644 --- a/akka-docs/src/main/paradox/scala/cluster-metrics.md +++ b/akka-docs/src/main/paradox/scala/cluster-metrics.md @@ -118,11 +118,11 @@ Let's take a look at this router in action. What can be more demanding than calc The backend worker that performs the factorial calculation: -@@snip [FactorialBackend.scala](code/docs/cluster/FactorialBackend.scala) { #backend } +@@snip [FactorialBackend.scala]($code$/scala/docs/cluster/FactorialBackend.scala) { #backend } The frontend that receives user jobs and delegates to the backends via the router: -@@snip [FactorialFrontend.scala](code/docs/cluster/FactorialFrontend.scala) { #frontend } +@@snip [FactorialFrontend.scala]($code$/scala/docs/cluster/FactorialFrontend.scala) { #frontend } As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows: @@ -152,9 +152,9 @@ other things work in the same way as other routers. The same type of router could also have been defined in code: -@@snip [FactorialFrontend.scala](code/docs/cluster/FactorialFrontend.scala) { #router-lookup-in-code } +@@snip [FactorialFrontend.scala]($code$/scala/docs/cluster/FactorialFrontend.scala) { #router-lookup-in-code } -@@snip [FactorialFrontend.scala](code/docs/cluster/FactorialFrontend.scala) { #router-deploy-in-code } +@@snip [FactorialFrontend.scala]($code$/scala/docs/cluster/FactorialFrontend.scala) { #router-deploy-in-code } The easiest way to run **Adaptive Load Balancing** example yourself is to download the ready to run [Akka Cluster Sample with Scala](@exampleCodeService@/akka-samples-cluster-scala) @@ -165,7 +165,7 @@ The source code of this sample can be found in the [Akka Samples Repository](@sa It is possible to subscribe to the metrics events directly to implement other functionality. -@@snip [MetricsListener.scala](code/docs/cluster/MetricsListener.scala) { #metrics-listener } +@@snip [MetricsListener.scala]($code$/scala/docs/cluster/MetricsListener.scala) { #metrics-listener } ## Custom Metrics Collector @@ -183,4 +183,4 @@ Custom metrics collector implementation class must be specified in the The Cluster metrics extension can be configured with the following properties: -@@snip [reference.conf]../../../../../akka-cluster-metrics/src/main/resources/reference.conf) { # } \ No newline at end of file +@@snip [reference.conf]($akka$/akka-cluster-metrics/src/main/resources/reference.conf) \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/cluster-sharding.md b/akka-docs/src/main/paradox/scala/cluster-sharding.md index f34d46507f..8420facdf0 100644 --- a/akka-docs/src/main/paradox/scala/cluster-sharding.md +++ b/akka-docs/src/main/paradox/scala/cluster-sharding.md @@ -35,7 +35,7 @@ See @ref:[Downing](../java/cluster-usage.md#automatic-vs-manual-downing-java). This is how an entity actor may look like: -@@snip [ClusterShardingSpec.scala]../../../../../akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-actor } +@@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-actor } The above actor uses event sourcing and the support provided in `PersistentActor` to store its state. It does not have to be a persistent actor, but in case of failure or migration of entities between nodes it must be able to recover @@ -48,12 +48,12 @@ When using the sharding extension you are first, typically at system startup on in the cluster, supposed to register the supported entity types with the `ClusterSharding.start` method. `ClusterSharding.start` gives you the reference which you can pass along. -@@snip [ClusterShardingSpec.scala]../../../../../akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-start } +@@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-start } The `extractEntityId` and `extractShardId` are two application specific functions to extract the entity identifier and the shard identifier from incoming messages. -@@snip [ClusterShardingSpec.scala]../../../../../akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-extractor } +@@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-extractor } This example illustrates two different ways to define the entity identifier in the messages: @@ -88,7 +88,7 @@ The `ShardRegion` will lookup the location of the shard for the entity if it doe delegate the message to the right node and it will create the entity actor on demand, i.e. when the first message for a specific entity is delivered. -@@snip [ClusterShardingSpec.scala]../../../../../akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-usage } +@@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-usage } A more comprehensive sample is available in the tutorial named [Akka Cluster Sharding with Scala!](https://github.com/typesafehub/activator-akka-cluster-sharding-scala). @@ -303,11 +303,11 @@ If you need to use another `supervisorStrategy` for the entity actors than the d you need to create an intermediate parent actor that defines the `supervisorStrategy` to the child entity actor. -@@snip [ClusterShardingSpec.scala]../../../../../akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #supervisor } +@@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #supervisor } You start such a supervisor in the same way as if it was the entity actor. -@@snip [ClusterShardingSpec.scala]../../../../../akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-supervisor-start } +@@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-supervisor-start } Note that stopped entities will be started again when a new message is targeted to the entity. @@ -400,7 +400,7 @@ with the same layout as below. `ClusterShardingSettings` is a parameter to the ` the `ClusterSharding` extension, i.e. each each entity type can be configured with different settings if needed. -@@snip [reference.conf]../../../../../akka-cluster-sharding/src/main/resources/reference.conf) { #sharding-ext-config } +@@snip [reference.conf]($akka$/akka-cluster-sharding/src/main/resources/reference.conf) { #sharding-ext-config } Custom shard allocation strategy can be defined in an optional parameter to `ClusterSharding.start`. See the API documentation of `ShardAllocationStrategy` for details of diff --git a/akka-docs/src/main/paradox/scala/cluster-singleton.md b/akka-docs/src/main/paradox/scala/cluster-singleton.md index 825b9bc55a..1a3041c243 100644 --- a/akka-docs/src/main/paradox/scala/cluster-singleton.md +++ b/akka-docs/src/main/paradox/scala/cluster-singleton.md @@ -86,7 +86,7 @@ scenario when integrating with external systems. On each node in the cluster you need to start the `ClusterSingletonManager` and supply the `Props` of the singleton actor, in this case the JMS queue consumer. -@@snip [ClusterSingletonManagerSpec.scala]../../../../../akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-manager } +@@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-manager } Here we limit the singleton to nodes tagged with the `"worker"` role, but all nodes, independent of role, can be used by not specifying `withRole`. @@ -97,12 +97,12 @@ perfectly fine `terminationMessage` if you only need to stop the actor. Here is how the singleton actor handles the `terminationMessage` in this example. -@@snip [ClusterSingletonManagerSpec.scala]../../../../../akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #consumer-end } +@@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #consumer-end } With the names given above, access to the singleton can be obtained from any cluster node using a properly configured proxy. -@@snip [ClusterSingletonManagerSpec.scala]../../../../../akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-proxy } +@@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-proxy } A more comprehensive sample is available in the tutorial named [Distributed workers with Akka and Scala!](https://github.com/typesafehub/activator-akka-distributed-workers). @@ -134,7 +134,7 @@ or create it from another config section with the same layout as below. `Cluster a parameter to the `ClusterSingletonManager.props` factory method, i.e. each singleton can be configured with different settings if needed. -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) { #singleton-config } +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-config } The following configuration properties are read by the `ClusterSingletonProxySettings` when created with a `ActorSystem` parameter. It is also possible to amend the `ClusterSingletonProxySettings` @@ -142,4 +142,4 @@ or create it from another config section with the same layout as below. `Cluster a parameter to the `ClusterSingletonProxy.props` factory method, i.e. each singleton proxy can be configured with different settings if needed. -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) { #singleton-proxy-config } \ No newline at end of file +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-proxy-config } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/cluster-usage.md b/akka-docs/src/main/paradox/scala/cluster-usage.md index e1c3c7a1d4..545d6b2e3d 100644 --- a/akka-docs/src/main/paradox/scala/cluster-usage.md +++ b/akka-docs/src/main/paradox/scala/cluster-usage.md @@ -71,7 +71,7 @@ ip-addresses or host names of the machines in `application.conf` instead of `127 An actor that uses the cluster extension may look like this: -@@snip [SimpleClusterListener.scala](code/docs/cluster/SimpleClusterListener.scala) { type=scala } +@@snip [SimpleClusterListener.scala]($code$/scala/docs/cluster/SimpleClusterListener.scala) { type=scala } The actor registers itself as subscriber of certain cluster events. It receives events corresponding to the current state of the cluster when the subscription starts and then it receives events for changes that happen in the cluster. @@ -215,7 +215,7 @@ A more graceful exit can be performed if you tell the cluster that a node shall This can be performed using [cluster_jmx_scala](#cluster-jmx-scala) or [cluster_http_scala](#cluster-http-scala). It can also be performed programmatically with: -@@snip [ClusterDocSpec.scala](code/docs/cluster/ClusterDocSpec.scala) { #leave } +@@snip [ClusterDocSpec.scala]($code$/scala/docs/cluster/ClusterDocSpec.scala) { #leave } Note that this command can be issued to any member in the cluster, not necessarily the one that is leaving. @@ -257,7 +257,7 @@ have no knowledge about the existence of the new members. You should for example You can subscribe to change notifications of the cluster membership by using `Cluster(system).subscribe`. -@@snip [SimpleClusterListener2.scala](code/docs/cluster/SimpleClusterListener2.scala) { # } +@@snip [SimpleClusterListener2.scala]($code$/scala/docs/cluster/SimpleClusterListener2.scala) { #subscribe } A snapshot of the full state, `akka.cluster.ClusterEvent.CurrentClusterState`, is sent to the subscriber as the first message, followed by events for incremental updates. @@ -274,7 +274,7 @@ the events corresponding to the current state to mimic what you would have seen listening to the events when they occurred in the past. Note that those initial events only correspond to the current state and it is not the full history of all changes that actually has occurred in the cluster. -@@snip [SimpleClusterListener.scala](code/docs/cluster/SimpleClusterListener.scala) { #subscribe } +@@snip [SimpleClusterListener.scala]($code$/scala/docs/cluster/SimpleClusterListener.scala) { #subscribe } The events to track the life-cycle of members are: @@ -309,11 +309,11 @@ added or removed to the cluster dynamically. Messages: -@@snip [TransformationMessages.scala](code/docs/cluster/TransformationMessages.scala) { #messages } +@@snip [TransformationMessages.scala]($code$/scala/docs/cluster/TransformationMessages.scala) { #messages } The backend worker that performs the transformation job: -@@snip [TransformationBackend.scala](code/docs/cluster/TransformationBackend.scala) { #backend } +@@snip [TransformationBackend.scala]($code$/scala/docs/cluster/TransformationBackend.scala) { #backend } Note that the `TransformationBackend` actor subscribes to cluster events to detect new, potential, frontend nodes, and send them a registration message so that they know @@ -321,7 +321,7 @@ that they can use the backend worker. The frontend that receives user jobs and delegates to one of the registered backend workers: -@@snip [TransformationFrontend.scala](code/docs/cluster/TransformationFrontend.scala) { #frontend } +@@snip [TransformationFrontend.scala]($code$/scala/docs/cluster/TransformationFrontend.scala) { #frontend } Note that the `TransformationFrontend` actor watch the registered backend to be able to remove it from its list of available backend workers. @@ -373,7 +373,7 @@ You can start the actors in a `registerOnMemberUp` callback, which will be invoked when the current member status is changed to 'Up', i.e. the cluster has at least the defined number of members. -@@snip [FactorialFrontend.scala](code/docs/cluster/FactorialFrontend.scala) { #registerOnUp } +@@snip [FactorialFrontend.scala]($code$/scala/docs/cluster/FactorialFrontend.scala) { #registerOnUp } This callback can be used for other things than starting actors. @@ -573,7 +573,7 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: -@@snip [StatsService.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #router-lookup-in-code } +@@snip [StatsService.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #router-lookup-in-code } See [cluster_configuration_scala](#cluster-configuration-scala) section for further descriptions of the settings. @@ -590,15 +590,15 @@ the average number of characters per word when all results have been collected. Messages: -@@snip [StatsMessages.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsMessages.scala) { #messages } +@@snip [StatsMessages.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsMessages.scala) { #messages } The worker that counts number of characters in each word: -@@snip [StatsWorker.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala) { #worker } +@@snip [StatsWorker.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala) { #worker } The service that receives text from users and splits it up into words, delegates to workers and aggregates: -@@snip [StatsService.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #service } +@@snip [StatsService.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #service } Note, nothing cluster specific so far, just plain actors. @@ -656,7 +656,7 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: -@@snip [StatsService.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #router-deploy-in-code } +@@snip [StatsService.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala) { #router-deploy-in-code } See [cluster_configuration_scala](#cluster-configuration-scala) section for further descriptions of the settings. @@ -725,12 +725,12 @@ add the `sbt-multi-jvm` plugin and the dependency to `akka-multi-node-testkit`. First, as described in @ref:[Multi Node Testing](../scala/dev/multi-node-testing.md), we need some scaffolding to configure the `MultiNodeSpec`. Define the participating roles and their [cluster_configuration_scala](#cluster-configuration-scala) in an object extending `MultiNodeConfig`: -@@snip [StatsSampleSpec.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #MultiNodeConfig } +@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #MultiNodeConfig } Define one concrete test class for each role/node. These will be instantiated on the different nodes (JVMs). They can be implemented differently, but often they are the same and extend an abstract test class, as illustrated here. -@@snip [StatsSampleSpec.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #concrete-tests } +@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #concrete-tests } Note the naming convention of these classes. The name of the classes must end with `MultiJvmNode1`, `MultiJvmNode2` and so on. It is possible to define another suffix to be used by the `sbt-multi-jvm`, but the default should be @@ -738,18 +738,18 @@ fine in most cases. Then the abstract `MultiNodeSpec`, which takes the `MultiNodeConfig` as constructor parameter. -@@snip [StatsSampleSpec.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #abstract-test } +@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #abstract-test } Most of this can of course be extracted to a separate trait to avoid repeating this in all your tests. Typically you begin your test by starting up the cluster and let the members join, and create some actors. That can be done like this: -@@snip [StatsSampleSpec.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #startup-cluster } +@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #startup-cluster } From the test you interact with the cluster using the `Cluster` extension, e.g. `join`. -@@snip [StatsSampleSpec.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #join } +@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #join } Notice how the *testActor* from @ref:[testkit](testing.md) is added as [subscriber](#cluster-subscriber-scala) to cluster changes and then waiting for certain events, such as in this case all members becoming 'Up'. @@ -757,7 +757,7 @@ to cluster changes and then waiting for certain events, such as in this case all The above code was running for all roles (JVMs). `runOn` is a convenient utility to declare that a certain block of code should only run for a specific role. -@@snip [StatsSampleSpec.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #test-statsService } +@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #test-statsService } Once again we take advantage of the facilities in @ref:[testkit](testing.md) to verify expected behavior. Here using `testActor` as sender (via `ImplicitSender`) and verifying the reply with `expectMsgPF`. @@ -765,7 +765,7 @@ Here using `testActor` as sender (via `ImplicitSender`) and verifying the reply In the above code you can see `node(third)`, which is useful facility to get the root actor reference of the actor system for a specific role. This can also be used to grab the `akka.actor.Address` of that node. -@@snip [StatsSampleSpec.scala]../../../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #addresses } +@@snip [StatsSampleSpec.scala]($akka$/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala) { #addresses } ## Management diff --git a/akka-docs/src/main/paradox/scala/common/circuitbreaker.md b/akka-docs/src/main/paradox/scala/common/circuitbreaker.md index 41973b057a..58694c2952 100644 --- a/akka-docs/src/main/paradox/scala/common/circuitbreaker.md +++ b/akka-docs/src/main/paradox/scala/common/circuitbreaker.md @@ -87,11 +87,11 @@ Here's how a :class: #### Scala -@@snip [CircuitBreakerDocSpec.scala](code/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #imports1 #circuit-breaker-initialization } +@@snip [CircuitBreakerDocSpec.scala]($code$/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #imports1 #circuit-breaker-initialization } #### Java -@@snip [DangerousJavaActor.java](code/docs/circuitbreaker/DangerousJavaActor.java) { #imports1 #circuit-breaker-initialization } +@@snip [DangerousJavaActor.java]($code$/java/jdocs/circuitbreaker/DangerousJavaActor.java) { #imports1 #circuit-breaker-initialization } ### Future & Synchronous based API @@ -101,11 +101,11 @@ The Synchronous API would also wrap your call with the circuit breaker logic, ho #### Scala -@@snip [CircuitBreakerDocSpec.scala](code/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #circuit-breaker-usage } +@@snip [CircuitBreakerDocSpec.scala]($code$/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #circuit-breaker-usage } #### Java -@@snip [DangerousJavaActor.java](code/docs/circuitbreaker/DangerousJavaActor.java) { #circuit-breaker-usage } +@@snip [DangerousJavaActor.java]($code$/java/jdocs/circuitbreaker/DangerousJavaActor.java) { #circuit-breaker-usage } @@@ note @@ -146,7 +146,7 @@ Type of `defineFailureFn`: `Try[T] ⇒ Boolean` This is a function which takes in a `Try[T]` and return a `Boolean`. The `Try[T]` correspond to the `Future[T]` of the protected call. This function should return `true` if the call should increase failure count, else false. -@@snip [CircuitBreakerDocSpec.scala](code/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #even-no-as-failure } +@@snip [CircuitBreakerDocSpec.scala]($code$/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #even-no-as-failure } #### Java @@ -155,7 +155,7 @@ Type of `defineFailureFn`: `BiFunction[Optional[T], Optional[Throwable], java.l For Java Api, the signature is a bit different as there's no `Try` in Java, so the response of protected call is modelled using `Optional[T]` for succeeded return value and `Optional[Throwable]` for exception, and the rules of return type is the same. Ie. this function should return `true` if the call should increase failure count, else false. -@@snip [EvenNoFailureJavaExample.java](code/docs/circuitbreaker/EvenNoFailureJavaExample.java) { #even-no-as-failure } +@@snip [EvenNoFailureJavaExample.java]($code$/java/jdocs/circuitbreaker/EvenNoFailureJavaExample.java) { #even-no-as-failure } ### Low level API @@ -171,8 +171,8 @@ The below examples doesn't make a remote call when the state is *HalfOpen*. Usin #### Scala -@@snip [CircuitBreakerDocSpec.scala](code/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #circuit-breaker-tell-pattern } +@@snip [CircuitBreakerDocSpec.scala]($code$/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala) { #circuit-breaker-tell-pattern } #### Java -@@snip [TellPatternJavaActor.java](code/docs/circuitbreaker/TellPatternJavaActor.java) { #circuit-breaker-tell-pattern } \ No newline at end of file +@@snip [TellPatternJavaActor.java]($code$/java/jdocs/circuitbreaker/TellPatternJavaActor.java) { #circuit-breaker-tell-pattern } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/common/duration.md b/akka-docs/src/main/paradox/scala/common/duration.md index febc38405b..3ccf0adc25 100644 --- a/akka-docs/src/main/paradox/scala/common/duration.md +++ b/akka-docs/src/main/paradox/scala/common/duration.md @@ -21,7 +21,7 @@ when finite-ness does not matter; this is a supertype of `FiniteDuration` In Scala durations are constructable using a mini-DSL and support all expected arithmetic operations: -@@snip [Sample.scala](code/docs/duration/Sample.scala) { #dsl } +@@snip [Sample.scala]($code$/scala/docs/duration/Sample.scala) { #dsl } @@@ note @@ -37,9 +37,9 @@ might go wrong, depending on what starts the next line. Java provides less syntactic sugar, so you have to spell out the operations as method calls instead: -@@snip [Java.java](code/docs/duration/Java.java) { #import } +@@snip [Java.java]($code$/java/jdocs/duration/Java.java) { #import } -@@snip [Java.java](code/docs/duration/Java.java) { #dsl } +@@snip [Java.java]($code$/java/jdocs/duration/Java.java) { #dsl } ## Deadline @@ -48,8 +48,8 @@ of an absolute point in time, and support deriving a duration from this by calcu difference between now and the deadline. This is useful when you want to keep one overall deadline without having to take care of the book-keeping wrt. the passing of time yourself: -@@snip [Sample.scala](code/docs/duration/Sample.scala) { #deadline } +@@snip [Sample.scala]($code$/scala/docs/duration/Sample.scala) { #deadline } In Java you create these from durations: -@@snip [Java.java](code/docs/duration/Java.java) { #deadline } \ No newline at end of file +@@snip [Java.java]($code$/java/jdocs/duration/Java.java) { #deadline } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/dev/multi-jvm-testing.md b/akka-docs/src/main/paradox/scala/dev/multi-jvm-testing.md index adf61cecc5..b052c26a47 100644 --- a/akka-docs/src/main/paradox/scala/dev/multi-jvm-testing.md +++ b/akka-docs/src/main/paradox/scala/dev/multi-jvm-testing.md @@ -9,7 +9,7 @@ The multi-JVM testing is an sbt plugin that you can find at [http://github.com/s You can add it as a plugin by adding the following to your project/plugins.sbt: -@@snip [plugins.sbt]../project/plugins.sbt) { #sbt-multi-jvm } +@@snip [plugins.sbt]($akka$/project/plugins.sbt) { #sbt-multi-jvm } You can then add multi-JVM testing to `build.sbt` or `project/Build.scala` by including the `MultiJvm` settings and config. Please note that MultiJvm test sources are located in `src/multi-jvm/...`, diff --git a/akka-docs/src/main/paradox/scala/dev/multi-node-testing.md b/akka-docs/src/main/paradox/scala/dev/multi-node-testing.md index 65810f1a3f..f53a8e30c7 100644 --- a/akka-docs/src/main/paradox/scala/dev/multi-node-testing.md +++ b/akka-docs/src/main/paradox/scala/dev/multi-node-testing.md @@ -164,17 +164,17 @@ We recommend against using `SNAPSHOT` in order to obtain stable builds. First we need some scaffolding to hook up the `MultiNodeSpec` with your favorite test framework. Lets define a trait `STMultiNodeSpec` that uses ScalaTest to start and stop `MultiNodeSpec`. -@@snip [STMultiNodeSpec.scala]../../../../../akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala) { #example } +@@snip [STMultiNodeSpec.scala]($akka$/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala) { #example } Then we need to define a configuration. Lets use two nodes `"node1` and `"node2"` and call it `MultiNodeSampleConfig`. -@@snip [MultiNodeSample.scala]../../../../../akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala) { #package #config } +@@snip [MultiNodeSample.scala]($akka$/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala) { #package #config } And then finally to the node test code. That starts the two nodes, and demonstrates a barrier, and a remote actor message send/receive. -@@snip [MultiNodeSample.scala]../../../../../akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala) { #package #spec } +@@snip [MultiNodeSample.scala]($akka$/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala) { #package #spec } The easiest way to run this example yourself is to download the ready to run [Akka Multi-Node Testing Sample with Scala](@exampleCodeService@/akka-samples-multi-node-scala) diff --git a/akka-docs/src/main/paradox/scala/dispatchers.md b/akka-docs/src/main/paradox/scala/dispatchers.md index 15e3c8f7ff..a4d6deae84 100644 --- a/akka-docs/src/main/paradox/scala/dispatchers.md +++ b/akka-docs/src/main/paradox/scala/dispatchers.md @@ -18,14 +18,14 @@ gives excellent performance in most cases. Dispatchers implement the `ExecutionContext` interface and can thus be used to run `Future` invocations etc. -@@snip [DispatcherDocSpec.scala](code/docs/dispatcher/DispatcherDocSpec.scala) { #lookup } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #lookup } ## Setting the dispatcher for an Actor So in case you want to give your `Actor` a different dispatcher than the default, you need to do two things, of which the first is to configure the dispatcher: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #my-dispatcher-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #my-dispatcher-config } @@@ note @@ -39,7 +39,7 @@ You can read more about parallelism in the JDK's [ForkJoinPool documentation](ht Another example that uses the "thread-pool-executor": > -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #fixed-pool-size-dispatcher-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #fixed-pool-size-dispatcher-config } @@@ note @@ -52,15 +52,15 @@ For more options, see the default-dispatcher section of the supervision. @@ -75,7 +75,7 @@ in the same way as the default strategy defined above. You can combine your own strategy with the default strategy: -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #default-strategy-fallback } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #default-strategy-fallback } ### Stopping Supervisor Strategy @@ -113,41 +113,41 @@ strategy. The following section shows the effects of the different directives in practice, where a test setup is needed. First off, we need a suitable supervisor: -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #supervisor } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #supervisor } This supervisor will be used to create a child, with which we can experiment: -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #child } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #child } The test is easier by using the utilities described in @ref:[Testing Actor Systems](testing.md). -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #testkit } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #testkit } Let us create actors: -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #create } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #create } The first test shall demonstrate the `Resume` directive, so we try it out by setting some non-initial state in the actor and have it fail: -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #resume } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #resume } As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious `NullPointerException`, that will no longer be the case: -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #restart } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #restart } And finally in case of the fatal `IllegalArgumentException` the child will be terminated by the supervisor: -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #stop } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #stop } Up to now the supervisor was completely unaffected by the child’s failure, because the directives set did handle it. In case of an `Exception`, this is not true anymore and the supervisor escalates the failure. -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #escalate-kill } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #escalate-kill } The supervisor itself is supervised by the top-level actor provided by the `ActorSystem`, which has the default policy to restart in case of all @@ -159,9 +159,9 @@ child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a different supervisor which overrides this behavior. -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #supervisor2 } +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #supervisor2 } With this parent, the child survives the escalated restart, as demonstrated in the last test: -@@snip [FaultHandlingDocSpec.scala](code/docs/actor/FaultHandlingDocSpec.scala) { #escalate-restart } \ No newline at end of file +@@snip [FaultHandlingDocSpec.scala]($code$/scala/docs/actor/FaultHandlingDocSpec.scala) { #escalate-restart } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/fsm.md b/akka-docs/src/main/paradox/scala/fsm.md index 9df24b7f85..8eca8f0a3c 100644 --- a/akka-docs/src/main/paradox/scala/fsm.md +++ b/akka-docs/src/main/paradox/scala/fsm.md @@ -24,17 +24,17 @@ send them on after the burst ended or a flush request is received. First, consider all of the below to use these import statements: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #simple-imports } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-imports } The contract of our “Buncher” actor is that it accepts or produces the following messages: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #simple-events } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-events } `SetTarget` is needed for starting it up, setting the destination for the `Batches` to be passed on; `Queue` will add to the internal queue while `Flush` will mark the end of a burst. -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #simple-state } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-state } The actor can be in two states: no message queued (aka `Idle`) or some message queued (aka `Active`). It will stay in the active state as long as @@ -44,7 +44,7 @@ the actual queue of messages. Now let’s take a look at the skeleton for our FSM actor: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #simple-fsm } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-fsm } The basic strategy is to declare the actor, mixing in the `FSM` trait and specifying the possible states and data values as type parameters. Within @@ -72,7 +72,7 @@ shall work identically in both states, we make use of the fact that any event which is not handled by the `when()` block is passed to the `whenUnhandled()` block: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #unhandled-elided } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #unhandled-elided } The first case handled here is adding `Queue()` requests to the internal queue and going to the `Active` state (this does the obvious thing of staying @@ -86,7 +86,7 @@ target, for which we use the `onTransition` mechanism: you can declare multiple such blocks and all of them will be tried for matching behavior in case a state transition occurs (i.e. only when the state actually changes). -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #transition-elided } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #transition-elided } The transition callback is a partial function which takes as input a pair of states—the current and the next state. The FSM trait includes a convenience @@ -108,7 +108,7 @@ To verify that this buncher actually works, it is quite easy to write a test using the @ref:[Testing Actor Systems](testing.md), which is conveniently bundled with ScalaTest traits into `AkkaSpec`: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #test-code } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #test-code } ## Reference @@ -117,7 +117,7 @@ into `AkkaSpec`: The `FSM` trait inherits directly from `Actor`, when you extend `FSM` you must be aware that an actor is actually created: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #simple-fsm } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #simple-fsm } @@@ note @@ -171,7 +171,7 @@ The `stateFunction` argument is a `PartialFunction[Event, State]`, which is conveniently given using the partial function literal syntax as demonstrated below: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #when-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #when-syntax } The `Event(msg: Any, data: D)` case class is parameterized with the data type held by the FSM for convenient pattern matching. @@ -188,7 +188,7 @@ sealed trait and then verify that there is a `when` clause for each of the states. If you want to leave the handling of a state “unhandled” (more below), it still needs to be declared like this: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #NullFunction } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #NullFunction } ### Defining the Initial State @@ -207,7 +207,7 @@ If a state doesn't handle a received event a warning is logged. If you want to do something else in this case you can specify that with `whenUnhandled(stateFunction)`: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #unhandled-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #unhandled-syntax } Within this handler the state of the FSM may be queried using the `stateName` method. @@ -246,7 +246,7 @@ does not modify the state transition. All modifiers can be chained to achieve a nice and concise description: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #modifier-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #modifier-syntax } The parentheses are not actually needed in all cases, but they visually distinguish between modifiers and their arguments and therefore make the code @@ -283,7 +283,7 @@ The handler is a partial function which takes a pair of states as input; no resulting state is needed as it is not possible to modify the transition in progress. -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #transition-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #transition-syntax } The convenience extractor `->` enables decomposition of the pair of states with a clear visual reminder of the transition's direction. As usual in pattern @@ -295,7 +295,7 @@ It is also possible to pass a function object accepting two states to `onTransition`, in case your transition handling logic is implemented as a method: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #alt-transition-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #alt-transition-syntax } The handlers registered with this method are stacked, so you can intersperse `onTransition` blocks with `when` blocks as suits your design. It @@ -341,13 +341,13 @@ transformed using Scala’s full supplement of functional programming tools. In order to retain type inference, there is a helper function which may be used in case some common handling logic shall be applied to different clauses: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #transform-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #transform-syntax } It goes without saying that the arguments to this method may also be stored, to be used several times, e.g. when applying the same transformation to several `when()` blocks: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #alt-transform-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #alt-transform-syntax } ### Timers @@ -398,13 +398,13 @@ may not be used within a `when` block). @@@ -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #stop-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #stop-syntax } You can use `onTermination(handler)` to specify custom code that is executed when the FSM is stopped. The handler is a partial function which takes a `StopEvent(reason, stateName, stateData)` as argument: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #termination-syntax } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #termination-syntax } As for the `whenUnhandled` case, this handler is not stacked, so each invocation of `onTermination` replaces the previously installed handler. @@ -436,7 +436,7 @@ and in the following. The setting `akka.actor.debug.fsm` in configuration enables logging of an event trace by `LoggingFSM` instances: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #logging-fsm } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #logging-fsm } This FSM will log at DEBUG level: @@ -455,7 +455,7 @@ The `LoggingFSM` trait adds one more feature to the FSM: a rolling event log which may be used during debugging (for tracing how the FSM entered a certain failure state) or for other creative uses: -@@snip [FSMDocSpec.scala](code/docs/actor/FSMDocSpec.scala) { #logging-fsm } +@@snip [FSMDocSpec.scala]($code$/scala/docs/actor/FSMDocSpec.scala) { #logging-fsm } The `logDepth` defaults to zero, which turns off the event log. diff --git a/akka-docs/src/main/paradox/scala/futures.md b/akka-docs/src/main/paradox/scala/futures.md index 116aefcd5d..bb18b5fea1 100644 --- a/akka-docs/src/main/paradox/scala/futures.md +++ b/akka-docs/src/main/paradox/scala/futures.md @@ -13,7 +13,7 @@ which is very similar to a `java.util.concurrent.Executor`. if you have an `Acto it will use its default dispatcher as the `ExecutionContext`, or you can use the factory methods provided by the `ExecutionContext` companion object to wrap `Executors` and `ExecutorServices`, or even create your own. -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #diy-execution-context } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #diy-execution-context } ### Within Actors @@ -24,7 +24,7 @@ actor (e.g. all CPU bound and no latency requirements), then it may be easiest to reuse the dispatcher for running the Futures by importing `context.dispatcher`. -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #context-dispatcher } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #context-dispatcher } ## Use With Actors @@ -33,7 +33,7 @@ which only works if the original sender was an `Actor`) and the second is throug Using an `Actor`'s `?` method to send a message will return a `Future`: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #ask-blocking } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #ask-blocking } This will cause the current thread to block and wait for the `Actor` to 'complete' the `Future` with it's reply. Blocking is discouraged though as it will cause performance problems. @@ -42,14 +42,14 @@ Alternatives to blocking are discussed further within this documentation. Also n an `Actor` is a `Future[Any]` since an `Actor` is dynamic. That is why the `asInstanceOf` is used in the above sample. When using non-blocking it is better to use the `mapTo` method to safely try to cast a `Future` to an expected type: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #map-to } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #map-to } The `mapTo` method will return a new `Future` that contains the result if the cast was successful, or a `ClassCastException` if not. Handling `Exception`s will be discussed further within this documentation. To send the result of a `Future` to an `Actor`, you can use the `pipe` construct: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #pipe-to } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to } ## Use Directly @@ -57,7 +57,7 @@ A common use case within Akka is to have some computation performed concurrently If you find yourself creating a pool of `Actor`s for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #future-eval } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #future-eval } In the above code the block passed to `Future` will be executed by the default `Dispatcher`, with the return value of the block used to complete the `Future` (in this case, the result would be the string: "HelloWorld"). @@ -66,15 +66,15 @@ and we also avoid the overhead of managing an `Actor`. You can also create already completed Futures using the `Future` companion, which can be either successes: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #successful } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #successful } Or failures: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #failed } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #failed } It is also possible to create an empty `Promise`, to be filled later, and obtain the corresponding `Future`: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #promise } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #promise } ## Functional Futures @@ -87,7 +87,7 @@ The first method for working with `Future` functionally is `map`. This method ta which performs some operation on the result of the `Future`, and returning a new result. The return value of the `map` method is another `Future` that will contain the new result: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #map } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #map } In this example we are joining two strings together within a `Future`. Instead of waiting for this to complete, we apply our function that calculates the length of the string using the `map` method. @@ -99,24 +99,24 @@ string "HelloWorld" and is unaffected by the `map`. The `map` method is fine if we are modifying a single `Future`, but if 2 or more `Future`s are involved `map` will not allow you to combine them together: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #wrong-nested-map } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #wrong-nested-map } `f3` is a `Future[Future[Int]]` instead of the desired `Future[Int]`. Instead, the `flatMap` method should be used: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #flat-map } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #flat-map } Composing futures using nested combinators it can sometimes become quite complicated and hard to read, in these cases using Scala's 'for comprehensions' usually yields more readable code. See next section for examples. If you need to do conditional propagation, you can use `filter`: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #filter } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #filter } ### For Comprehensions Since `Future` has a `map`, `filter` and `flatMap` method it can be easily used in a 'for comprehension': -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #for-comprehension } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #for-comprehension } Something to keep in mind when doing this is even though it looks like parts of the above example can run in parallel, each step of the for comprehension is run sequentially. This will happen on separate threads for each step but @@ -130,7 +130,7 @@ A common use case for this is combining the replies of several `Actor`s into a s without resorting to calling `Await.result` or `Await.ready` to block for each result. First an example of using `Await.result`: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #composing-wrong } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #composing-wrong } @@@ warning @@ -144,7 +144,7 @@ Here we wait for the results from the first 2 `Actor`s before sending that resul We called `Await.result` 3 times, which caused our little program to block 3 times before getting our final result. Now compare that to this example: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #composing } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #composing } Here we have 2 actors processing a single message each. Once the 2 results are available (note that we don't block to get these results!), they are being added together and sent to a third `Actor`, @@ -155,7 +155,7 @@ The `sequence` and `traverse` helper methods can make it easier to handle more c Both of these methods are ways of turning, for a subclass `T` of `Traversable`, `T[Future[A]]` into a `Future[T[A]]`. For example: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #sequence-ask } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #sequence-ask } To better explain what happened in the example, `Future.sequence` is taking the `List[Future[Int]]` and turning it into a `Future[List[Int]]`. We can then use `map` to work with the `List[Int]` directly, @@ -164,11 +164,11 @@ and we find the sum of the `List`. The `traverse` method is similar to `sequence`, but it takes a `T[A]` and a function `A => Future[B]` to return a `Future[T[B]]`, where `T` is again a subclass of Traversable. For example, to use `traverse` to sum the first 100 odd numbers: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #traverse } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #traverse } This is the same result as this example: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #sequence } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #sequence } But it may be faster to use `traverse` as it doesn't have to create an intermediate `List[Future[Int]]`. @@ -177,7 +177,7 @@ from the type of the start-value and the type of the futures and returns somethi and then applies the function to all elements in the sequence of futures, asynchronously, the execution will start when the last of the Futures is completed. -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #fold } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #fold } That's all it takes! @@ -185,7 +185,7 @@ If the sequence passed to `fold` is empty, it will return the start-value, in th In some cases you don't have a start-value and you're able to use the value of the first completing `Future` in the sequence as the start-value, you can use `reduce`, it works like this: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #reduce } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #reduce } Same as with `fold`, the execution will be done asynchronously when the last of the `Future` is completed, you can also parallelize it by chunking your futures into sub-sequences and reduce them, and then reduce the reduced results again. @@ -195,11 +195,11 @@ you can also parallelize it by chunking your futures into sub-sequences and redu Sometimes you just want to listen to a `Future` being completed, and react to that not by creating a new `Future`, but by side-effecting. For this Scala supports `onComplete`, `onSuccess` and `onFailure`, of which the last two are specializations of the first. -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #onSuccess } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onSuccess } -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #onFailure } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onFailure } -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #onComplete } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onComplete } ## Define Ordering @@ -209,19 +209,19 @@ But there's a solution and it's name is `andThen`. It creates a new `Future` wit the specified callback, a `Future` that will have the same result as the `Future` it's called on, which allows for ordering like in the following sample: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #and-then } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #and-then } ## Auxiliary Methods `Future` `fallbackTo` combines 2 Futures into a new `Future`, and will hold the successful value of the second `Future` if the first `Future` fails. -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #fallback-to } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #fallback-to } You can also combine two Futures into a new `Future` that will hold a tuple of the two Futures successful results, using the `zip` operation. -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #zip } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #zip } ## Exceptions @@ -233,7 +233,7 @@ If a `Future` does contain an `Exception`, calling `Await.result` will cause it It is also possible to handle an `Exception` by returning a different result. This is done with the `recover` method. For example: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #recover } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #recover } In this example, if the actor replied with a `akka.actor.Status.Failure` containing the `ArithmeticException`, our `Future` would have a result of 0. The `recover` method works very similarly to the standard try/catch blocks, @@ -243,10 +243,10 @@ it will behave as if we hadn't used the `recover` method. You can also use the `recoverWith` method, which has the same relationship to `recover` as `flatMap` has to `map`, and is use like this: -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #try-recover } +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #try-recover } ## After `akka.pattern.after` makes it easy to complete a `Future` with a value or exception after a timeout. -@@snip [FutureDocSpec.scala](code/docs/future/FutureDocSpec.scala) { #after } \ No newline at end of file +@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #after } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/general/configuration.md b/akka-docs/src/main/paradox/scala/general/configuration.md index 5ec341d609..9e6a7aef26 100644 --- a/akka-docs/src/main/paradox/scala/general/configuration.md +++ b/akka-docs/src/main/paradox/scala/general/configuration.md @@ -312,7 +312,7 @@ substitutions. You may also specify and parse the configuration programmatically in other ways when instantiating the `ActorSystem`. -@@snip [ConfigDocSpec.scala](code/docs/config/ConfigDocSpec.scala) { #imports #custom-config } +@@snip [ConfigDocSpec.scala]($code$/scala/docs/config/ConfigDocSpec.scala) { #imports #custom-config } ## Reading configuration from a custom location @@ -355,7 +355,7 @@ you could put a config string in code using You can also combine your custom config with the usual config, that might look like: -@@snip [ConfigDoc.java](code/docs/config/ConfigDoc.java) { #java-custom-config } +@@snip [ConfigDoc.java]($code$/java/jdocs/config/ConfigDoc.java) { #java-custom-config } When working with `Config` objects, keep in mind that there are three "layers" in the cake: @@ -392,7 +392,7 @@ things like dispatcher, mailbox, router settings, and remote deployment. Configuration of these features are described in the chapters detailing corresponding topics. An example may look like this: -@@snip [ConfigDocSpec.scala](code/docs/config/ConfigDocSpec.scala) { #deployment-section } +@@snip [ConfigDocSpec.scala]($code$/scala/docs/config/ConfigDocSpec.scala) { #deployment-section } @@@ note @@ -427,64 +427,64 @@ Each Akka module has a reference configuration file with the default values. ### akka-actor -@@snip [reference.conf]../../../../../akka-actor/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-actor/src/main/resources/reference.conf) ### akka-agent -@@snip [reference.conf]../../../../../akka-agent/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-agent/src/main/resources/reference.conf) ### akka-camel -@@snip [reference.conf]../../../../../akka-camel/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-camel/src/main/resources/reference.conf) ### akka-cluster -@@snip [reference.conf]../../../../../akka-cluster/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-cluster/src/main/resources/reference.conf) ### akka-multi-node-testkit -@@snip [reference.conf]../../../../../akka-multi-node-testkit/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-multi-node-testkit/src/main/resources/reference.conf) ### akka-persistence -@@snip [reference.conf]../../../../../akka-persistence/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-persistence/src/main/resources/reference.conf) ### akka-remote -@@snip [reference.conf]../../../../../akka-remote/src/main/resources/reference.conf) { #shared #classic type=none } +@@snip [reference.conf]($akka$/akka-remote/src/main/resources/reference.conf) { #shared #classic type=none } ### akka-remote (artery) -@@snip [reference.conf]../../../../../akka-remote/src/main/resources/reference.conf) { #shared #artery type=none } +@@snip [reference.conf]($akka$/akka-remote/src/main/resources/reference.conf) { #shared #artery type=none } ### akka-testkit -@@snip [reference.conf]../../../../../akka-testkit/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-testkit/src/main/resources/reference.conf) ### akka-cluster-metrics -@@snip [reference.conf]../../../../../akka-cluster-metrics/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-cluster-metrics/src/main/resources/reference.conf) ### akka-cluster-tools -@@snip [reference.conf]../../../../../akka-cluster-tools/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) ### akka-cluster-sharding -@@snip [reference.conf]../../../../../akka-cluster-sharding/src/main/resources/reference.conf) +@@snip [reference.conf]($akka$/akka-cluster-sharding/src/main/resources/reference.conf) ### akka-distributed-data -@@snip [reference.conf]../../../../../akka-distributed-data/src/main/resources/reference.conf) \ No newline at end of file +@@snip [reference.conf]($akka$/akka-distributed-data/src/main/resources/reference.conf) \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/general/jmm.md b/akka-docs/src/main/paradox/scala/general/jmm.md index a2d4d4a79f..0afff787d6 100644 --- a/akka-docs/src/main/paradox/scala/general/jmm.md +++ b/akka-docs/src/main/paradox/scala/general/jmm.md @@ -67,6 +67,6 @@ Since Akka runs on the JVM there are still some rules to be followed. * Closing over internal Actor state and exposing it to other threads -@@snip [SharedMutableStateDocSpec.scala](../../scala/code/docs/actor/SharedMutableStateDocSpec.scala) { #mutable-state } +@@snip [SharedMutableStateDocSpec.scala]($code$/scala/docs/actor/SharedMutableStateDocSpec.scala) { #mutable-state } * Messages **should** be immutable, this is to avoid the shared mutable state trap. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/general/stream/stream-configuration.md b/akka-docs/src/main/paradox/scala/general/stream/stream-configuration.md index 4c0ca931a0..c176d06dc0 100644 --- a/akka-docs/src/main/paradox/scala/general/stream/stream-configuration.md +++ b/akka-docs/src/main/paradox/scala/general/stream/stream-configuration.md @@ -1,3 +1,3 @@ # Configuration -@@snip [reference.conf]../../../../../../akka-stream/src/main/resources/reference.conf) \ No newline at end of file +@@snip [reference.conf]($akka$/akka-stream/src/main/resources/reference.conf) \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/general/supervision.md b/akka-docs/src/main/paradox/scala/general/supervision.md index 134552bd21..42529e2d94 100644 --- a/akka-docs/src/main/paradox/scala/general/supervision.md +++ b/akka-docs/src/main/paradox/scala/general/supervision.md @@ -204,13 +204,13 @@ to recover before the peristent actor is started. The following Scala snippet shows how to create a backoff supervisor which will start the given echo actor after it has stopped because of a failure, in increasing intervals of 3, 6, 12, 24 and finally 30 seconds: -@@snip [BackoffSupervisorDocSpec.scala](../../scala/code/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-stop } +@@snip [BackoffSupervisorDocSpec.scala]($code$/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-stop } The above is equivalent to this Java code: -@@snip [BackoffSupervisorDocTest.java](../../java/code/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-imports } +@@snip [BackoffSupervisorDocTest.java]($code$/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-imports } -@@snip [BackoffSupervisorDocTest.java](../../java/code/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-stop } +@@snip [BackoffSupervisorDocTest.java]($code$/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-stop } Using a `randomFactor` to add a little bit of additional variance to the backoff intervals is highly recommended, in order to avoid multiple actors re-start at the exact same point in time, @@ -225,23 +225,23 @@ crashes and the supervision strategy decides that it should restart. The following Scala snippet shows how to create a backoff supervisor which will start the given echo actor after it has crashed because of some exception, in increasing intervals of 3, 6, 12, 24 and finally 30 seconds: -@@snip [BackoffSupervisorDocSpec.scala](../../scala/code/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-fail } +@@snip [BackoffSupervisorDocSpec.scala]($code$/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-fail } The above is equivalent to this Java code: -@@snip [BackoffSupervisorDocTest.java](../../java/code/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-imports } +@@snip [BackoffSupervisorDocTest.java]($code$/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-imports } -@@snip [BackoffSupervisorDocTest.java](../../java/code/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-fail } +@@snip [BackoffSupervisorDocTest.java]($code$/java/jdocs/pattern/BackoffSupervisorDocTest.java) { #backoff-fail } The `akka.pattern.BackoffOptions` can be used to customize the behavior of the back-off supervisor actor, below are some examples: -@@snip [BackoffSupervisorDocSpec.scala](../../scala/code/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-custom-stop } +@@snip [BackoffSupervisorDocSpec.scala]($code$/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-custom-stop } The above code sets up a back-off supervisor that requires the child actor to send a `akka.pattern.BackoffSupervisor.Reset` message to its parent when a message is successfully processed, resetting the back-off. It also uses a default stopping strategy, any exception will cause the child to stop. -@@snip [BackoffSupervisorDocSpec.scala](../../scala/code/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-custom-fail } +@@snip [BackoffSupervisorDocSpec.scala]($code$/scala/docs/pattern/BackoffSupervisorDocSpec.scala) { #backoff-custom-fail } The above code sets up a back-off supervisor that restarts the child after back-off if MyException is thrown, any other exception will be escalated. The back-off is automatically reset if the child does not throw any errors within 10 seconds. diff --git a/akka-docs/src/main/paradox/scala/howto.md b/akka-docs/src/main/paradox/scala/howto.md index 9bd27ebcef..bef3d08a04 100644 --- a/akka-docs/src/main/paradox/scala/howto.md +++ b/akka-docs/src/main/paradox/scala/howto.md @@ -127,7 +127,7 @@ sent, and how long the initial delay is. Worst case scenario is `interval` plus @@@ -@@snip [SchedulerPatternSpec.scala](code/docs/pattern/SchedulerPatternSpec.scala) { #schedule-constructor } +@@snip [SchedulerPatternSpec.scala]($code$/scala/docs/pattern/SchedulerPatternSpec.scala) { #schedule-constructor } The second variant sets up an initial one shot message send in the `preStart` method of the actor, and the then the actor when it receives this message sets up a new one shot @@ -141,4 +141,4 @@ under pressure, but only schedule a new tick message when we have seen the previ @@@ -@@snip [SchedulerPatternSpec.scala](code/docs/pattern/SchedulerPatternSpec.scala) { #schedule-receive } \ No newline at end of file +@@snip [SchedulerPatternSpec.scala]($code$/scala/docs/pattern/SchedulerPatternSpec.scala) { #schedule-receive } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/io-tcp.md b/akka-docs/src/main/paradox/scala/io-tcp.md index 21a1156e4c..b737e2798b 100644 --- a/akka-docs/src/main/paradox/scala/io-tcp.md +++ b/akka-docs/src/main/paradox/scala/io-tcp.md @@ -2,19 +2,19 @@ The code snippets through-out this section assume the following imports: -@@snip [IODocSpec.scala](code/docs/io/IODocSpec.scala) { #imports } +@@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #imports } All of the Akka I/O APIs are accessed through manager objects. When using an I/O API, the first step is to acquire a reference to the appropriate manager. The code below shows how to acquire a reference to the `Tcp` manager. -@@snip [IODocSpec.scala](code/docs/io/IODocSpec.scala) { #manager } +@@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #manager } The manager is an actor that handles the underlying low level I/O resources (selectors, channels) and instantiates workers for specific tasks, such as listening to incoming connections. ## Connecting -@@snip [IODocSpec.scala](code/docs/io/IODocSpec.scala) { #client } +@@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #client } The first step of connecting to a remote address is sending a `Connect` message to the TCP manager; in addition to the simplest form shown above there @@ -56,7 +56,7 @@ fine-grained connection close events, see [Closing Connections](#closing-connect ## Accepting connections -@@snip [IODocSpec.scala](code/docs/io/IODocSpec.scala) { #server } +@@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #server } To create a TCP server and listen for inbound connections, a `Bind` command has to be sent to the TCP manager. This will instruct the TCP manager @@ -75,7 +75,7 @@ handler when sending the `Register` message. Writes can be sent from any actor in the system to the connection actor (i.e. the actor which sent the `Connected` message). The simplistic handler is defined as: -@@snip [IODocSpec.scala](code/docs/io/IODocSpec.scala) { #simplistic-handler } +@@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #simplistic-handler } For a more complete sample which also takes into account the possibility of failures when sending please see [Throttling Reads and Writes](#throttling-reads-and-writes) below. @@ -211,18 +211,18 @@ this allows the example `EchoHandler` to write all outstanding data back to the client before fully closing the connection. This is enabled using a flag upon connection activation (observe the `Register` message): -@@snip [EchoServer.scala](code/docs/io/EchoServer.scala) { #echo-manager } +@@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #echo-manager } With this preparation let us dive into the handler itself: -@@snip [EchoServer.scala](code/docs/io/EchoServer.scala) { #simple-echo-handler } +@@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #simple-echo-handler } The principle is simple: when having written a chunk always wait for the `Ack` to come back before sending the next chunk. While waiting we switch behavior such that new incoming data are buffered. The helper functions used are a bit lengthy but not complicated: -@@snip [EchoServer.scala](code/docs/io/EchoServer.scala) { #simple-helpers } +@@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #simple-helpers } The most interesting part is probably the last: an `Ack` removes the oldest data chunk from the buffer, and if that was the last chunk then we either close @@ -243,14 +243,14 @@ how end-to-end back-pressure is realized across a TCP connection. ## NACK-Based Write Back-Pressure with Suspending -@@snip [EchoServer.scala](code/docs/io/EchoServer.scala) { #echo-handler } +@@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #echo-handler } The principle here is to keep writing until a `CommandFailed` is received, using acknowledgements only to prune the resend buffer. When a such a failure was received, transition into a different state for handling and handle resending of all queued data: -@@snip [EchoServer.scala](code/docs/io/EchoServer.scala) { #buffering } +@@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #buffering } It should be noted that all writes which are currently buffered have also been sent to the connection actor upon entering this state, which means that the @@ -263,7 +263,7 @@ is exploited by the `EchoHandler` to switch to an ACK-based approach for the first ten writes after a failure before resuming the optimistic write-through behavior. -@@snip [EchoServer.scala](code/docs/io/EchoServer.scala) { #closing } +@@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #closing } Closing the connection while still sending all data is a bit more involved than in the ACK-based approach: the idea is to always send all outstanding messages @@ -272,7 +272,7 @@ behavior to await the `WritingResumed` event and start over. The helper functions are very similar to the ACK-based case: -@@snip [EchoServer.scala](code/docs/io/EchoServer.scala) { #helpers } +@@snip [EchoServer.scala]($code$/scala/docs/io/EchoServer.scala) { #helpers } ## Read Back-Pressure with Pull Mode @@ -284,7 +284,7 @@ since the rate of writing might be slower than the rate of the arrival of new da With the Pull mode this buffer can be completely eliminated as the following snippet demonstrates: -@@snip [ReadBackPressure.scala](code/docs/io/ReadBackPressure.scala) { #pull-reading-echo } +@@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-reading-echo } The idea here is that reading is not resumed until the previous write has been completely acknowledged by the connection actor. Every pull mode connection @@ -297,7 +297,7 @@ a buffer. To enable pull reading on an outbound connection the `pullMode` parameter of the `Connect` should be set to `true`: -@@snip [ReadBackPressure.scala](code/docs/io/ReadBackPressure.scala) { #pull-mode-connect } +@@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-mode-connect } ### Pull Mode Reading for Inbound Connections @@ -305,7 +305,7 @@ The previous section demonstrated how to enable pull reading mode for outbound connections but it is possible to create a listener actor with this mode of reading by setting the `pullMode` parameter of the `Bind` command to `true`: -@@snip [ReadBackPressure.scala](code/docs/io/ReadBackPressure.scala) { #pull-mode-bind } +@@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-mode-bind } One of the effects of this setting is that all connections accepted by this listener actor will use pull mode reading. @@ -318,11 +318,11 @@ it a `ResumeAccepting` message. Listener actors with pull mode start suspended so to start accepting connections a `ResumeAccepting` command has to be sent to the listener actor after binding was successful: -@@snip [ReadBackPressure.scala](code/docs/io/ReadBackPressure.scala) { #pull-accepting } +@@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-accepting } After handling an incoming connection we need to resume accepting again: -@@snip [ReadBackPressure.scala](code/docs/io/ReadBackPressure.scala) { #pull-accepting-cont } +@@snip [ReadBackPressure.scala]($code$/scala/docs/io/ReadBackPressure.scala) { #pull-accepting-cont } The `ResumeAccepting` accepts a `batchSize` parameter that specifies how many new connections are accepted before a next `ResumeAccepting` message diff --git a/akka-docs/src/main/paradox/scala/io-udp.md b/akka-docs/src/main/paradox/scala/io-udp.md index 134e3dc0bc..7e75c7f5d8 100644 --- a/akka-docs/src/main/paradox/scala/io-udp.md +++ b/akka-docs/src/main/paradox/scala/io-udp.md @@ -18,7 +18,7 @@ offered using distinct IO extensions described below. ### Simple Send -@@snip [UdpDocSpec.scala](code/docs/io/UdpDocSpec.scala) { #sender } +@@snip [UdpDocSpec.scala]($code$/scala/docs/io/UdpDocSpec.scala) { #sender } The simplest form of UDP usage is to just send datagrams without the need of getting a reply. To this end a “simple sender” facility is provided as @@ -39,7 +39,7 @@ want to close the ephemeral port the sender is bound to. ### Bind (and Send) -@@snip [UdpDocSpec.scala](code/docs/io/UdpDocSpec.scala) { #listener } +@@snip [UdpDocSpec.scala]($code$/scala/docs/io/UdpDocSpec.scala) { #listener } If you want to implement a UDP server which listens on a socket for incoming datagrams then you need to use the `Bind` command as shown above. The @@ -64,7 +64,7 @@ bind-and-send service we saw earlier, but the main difference is that a connection is only able to send to the `remoteAddress` it was connected to, and will receive datagrams only from that address. -@@snip [UdpDocSpec.scala](code/docs/io/UdpDocSpec.scala) { #connected } +@@snip [UdpDocSpec.scala]($code$/scala/docs/io/UdpDocSpec.scala) { #connected } Consequently the example shown here looks quite similar to the previous one, the biggest difference is the absence of remote address information in @@ -90,12 +90,12 @@ To select a Protocol Family you must extend `akka.io.Inet.DatagramChannelCreator class which extends `akka.io.Inet.SocketOption`. Provide custom logic for opening a datagram channel by overriding `create` method. -@@snip [ScalaUdpMulticast.scala](code/docs/io/ScalaUdpMulticast.scala) { #inet6-protocol-family } +@@snip [ScalaUdpMulticast.scala]($code$/scala/docs/io/ScalaUdpMulticast.scala) { #inet6-protocol-family } Another socket option will be needed to join a multicast group. -@@snip [ScalaUdpMulticast.scala](code/docs/io/ScalaUdpMulticast.scala) { #multicast-group } +@@snip [ScalaUdpMulticast.scala]($code$/scala/docs/io/ScalaUdpMulticast.scala) { #multicast-group } Socket options must be provided to `UdpMessage.Bind` message. -@@snip [ScalaUdpMulticast.scala](code/docs/io/ScalaUdpMulticast.scala) { #bind } \ No newline at end of file +@@snip [ScalaUdpMulticast.scala]($code$/scala/docs/io/ScalaUdpMulticast.scala) { #bind } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/io.md b/akka-docs/src/main/paradox/scala/io.md index c0de86f95d..f2df1c4713 100644 --- a/akka-docs/src/main/paradox/scala/io.md +++ b/akka-docs/src/main/paradox/scala/io.md @@ -22,7 +22,7 @@ as an entry point for the API. I/O is broken into several drivers. The manager f is accessible through the `IO` entry point. For example the following code looks up the TCP manager and returns its `ActorRef`: -@@snip [IODocSpec.scala](code/docs/io/IODocSpec.scala) { #manager } +@@snip [IODocSpec.scala]($code$/scala/docs/io/IODocSpec.scala) { #manager } The manager receives I/O command messages and instantiates worker actors in response. The worker actors present themselves to the API user in the reply to the command that was sent. For example after a `Connect` command sent to diff --git a/akka-docs/src/main/paradox/scala/logging.md b/akka-docs/src/main/paradox/scala/logging.md index 6a40684078..49fc0baa2e 100644 --- a/akka-docs/src/main/paradox/scala/logging.md +++ b/akka-docs/src/main/paradox/scala/logging.md @@ -12,7 +12,7 @@ synchronously. Create a `LoggingAdapter` and use the `error`, `warning`, `info`, or `debug` methods, as illustrated in this example: -@@snip [LoggingDocSpec.scala](code/docs/event/LoggingDocSpec.scala) { #my-actor } +@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-actor } For convenience, you can mix in the `log` member into actors, instead of defining it as above. @@ -38,7 +38,7 @@ placeholders results in a warning being appended to the log statement (i.e. on the same line with the same severity). You may pass an array as the only substitution argument to have its elements be treated individually: -@@snip [LoggingDocSpec.scala](code/docs/event/LoggingDocSpec.scala) { #array } +@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #array } The Java `Class` of the log source is also included in the generated `LogEvent`. In case of a simple string this is replaced with a “marker” @@ -223,7 +223,7 @@ using implicit parameters and thus fully customizable: simply create your own instance of `LogSource[T]` and have it in scope when creating the logger. -@@snip [LoggingDocSpec.scala](code/docs/event/LoggingDocSpec.scala) { #my-source } +@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-source } This example creates a log source which mimics traditional usage of Java loggers, which are based upon the originating object’s class name as log @@ -292,7 +292,7 @@ logger available in the 'akka-slf4j' module. Example of creating a listener: -@@snip [LoggingDocSpec.scala](code/docs/event/LoggingDocSpec.scala) { #my-event-listener } +@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-event-listener } ## Logging to stdout during startup and shutdown @@ -453,13 +453,13 @@ if it is not set to a new map. Use `log.clearMDC()`. @@@ -@@snip [LoggingDocSpec.scala](code/docs/event/LoggingDocSpec.scala) { #mdc } +@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #mdc } For convenience, you can mix in the `log` member into actors, instead of defining it as above. This trait also lets you override `def mdc(msg: Any): MDC` for specifying MDC values depending on current message and lets you forget about the cleanup as well, since it already does it for you. -@@snip [LoggingDocSpec.scala](code/docs/event/LoggingDocSpec.scala) { #mdc-actor } +@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #mdc-actor } Now, the values will be available in the MDC, so you can use them in the layout pattern: diff --git a/akka-docs/src/main/paradox/scala/mailboxes.md b/akka-docs/src/main/paradox/scala/mailboxes.md index 6287ae5298..5ac6750abe 100644 --- a/akka-docs/src/main/paradox/scala/mailboxes.md +++ b/akka-docs/src/main/paradox/scala/mailboxes.md @@ -12,12 +12,12 @@ It is possible to require a certain type of message queue for a certain type of by having that actor extend the parameterized trait `RequiresMessageQueue`. Here is an example: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #required-mailbox-class } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #required-mailbox-class } The type parameter to the `RequiresMessageQueue` trait needs to be mapped to a mailbox in configuration like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #bounded-mailbox-config #required-mailbox-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #bounded-mailbox-config #required-mailbox-config } Now every time you create an actor of type `MyBoundedActor` it will try to get a bounded mailbox. If the actor has a different mailbox configured in deployment, either directly or via @@ -181,27 +181,27 @@ The following mailboxes should only be used with zero `mailbox-push-timeout-time How to create a PriorityMailbox: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox } And then add it to the configuration: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher-config } And then an example on how you would use it: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher } It is also possible to configure a mailbox type directly like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config #mailbox-deployment-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config #mailbox-deployment-config } And then use it either from deployment like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-config } Or code like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-code } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-code } ### ControlAwareMailbox @@ -210,21 +210,21 @@ immediately no matter how many other messages are already in its mailbox. It can be configured like this: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-config } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-config } Control messages need to extend the `ControlMessage` trait: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-messages } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-messages } And then an example on how you would use it: -@@snip [DispatcherDocSpec.scala](../scala/code/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-dispatcher } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-dispatcher } ## Creating your own Mailbox type An example is worth a thousand quacks: -@@snip [MyUnboundedMailbox.scala](../scala/code/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-implementation-example } +@@snip [MyUnboundedMailbox.scala]($code$/scala/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-implementation-example } And then you just specify the FQCN of your MailboxType as the value of the "mailbox-type" in the dispatcher configuration, or the mailbox configuration. @@ -243,11 +243,11 @@ dispatcher or mailbox setting using it. You can also use the mailbox as a requirement on the dispatcher like this: -@@snip [DispatcherDocSpec.scala](code/docs/dispatcher/DispatcherDocSpec.scala) { #custom-mailbox-config-java } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #custom-mailbox-config-java } Or by defining the requirement on your actor class like this: -@@snip [DispatcherDocSpec.scala](code/docs/dispatcher/DispatcherDocSpec.scala) { #require-mailbox-on-actor } +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #require-mailbox-on-actor } ## Special Semantics of `system.actorOf` diff --git a/akka-docs/src/main/paradox/scala/persistence-query-leveldb.md b/akka-docs/src/main/paradox/scala/persistence-query-leveldb.md index 02be924484..8e77a82ed9 100644 --- a/akka-docs/src/main/paradox/scala/persistence-query-leveldb.md +++ b/akka-docs/src/main/paradox/scala/persistence-query-leveldb.md @@ -17,7 +17,7 @@ Make sure that you have the following dependency in your project: The `ReadJournal` is retrieved via the `akka.persistence.query.PersistenceQuery` extension: -@@snip [LeveldbPersistenceQueryDocSpec.scala](code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #get-read-journal } +@@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #get-read-journal } ## Supported Queries @@ -26,7 +26,7 @@ extension: `eventsByPersistenceId` is used for retrieving events for a specific `PersistentActor` identified by `persistenceId`. -@@snip [LeveldbPersistenceQueryDocSpec.scala](code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #EventsByPersistenceId } +@@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #EventsByPersistenceId } You can retrieve a subset of all events by specifying `fromSequenceNr` and `toSequenceNr` or use `0L` and `Long.MaxValue` respectively to retrieve all events. Note that @@ -54,7 +54,7 @@ backend journal. `allPersistenceIds` is used for retrieving all `persistenceIds` of all persistent actors. -@@snip [LeveldbPersistenceQueryDocSpec.scala](code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #AllPersistenceIds } +@@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #AllPersistenceIds } The returned event stream is unordered and you can expect different order for multiple executions of the query. @@ -75,12 +75,12 @@ backend journal. `eventsByTag` is used for retrieving events that were marked with a given tag, e.g. all domain events of an Aggregate Root type. -@@snip [LeveldbPersistenceQueryDocSpec.scala](code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #EventsByTag } +@@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #EventsByTag } To tag events you create an @ref:[Event Adapters](persistence.md#event-adapters-scala) that wraps the events in a `akka.persistence.journal.Tagged` with the given `tags`. -@@snip [LeveldbPersistenceQueryDocSpec.scala](code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #tagger } +@@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #tagger } You can use `NoOffset` to retrieve all events with a given tag or retrieve a subset of all events by specifying a `Sequence` `offset`. The `offset` corresponds to an ordered sequence number for @@ -128,4 +128,4 @@ for the default `LeveldbReadJournal.Identifier`. It can be configured with the following properties: -@@snip [reference.conf]../../../../../akka-persistence-query/src/main/resources/reference.conf) { #query-leveldb } \ No newline at end of file +@@snip [reference.conf]($akka$/akka-persistence-query/src/main/resources/reference.conf) { #query-leveldb } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/persistence-query.md b/akka-docs/src/main/paradox/scala/persistence-query.md index aab6ad8510..107fc3bde4 100644 --- a/akka-docs/src/main/paradox/scala/persistence-query.md +++ b/akka-docs/src/main/paradox/scala/persistence-query.md @@ -38,7 +38,7 @@ Read journals are implemented as [Community plugins](http://akka.io/community/#p databases). For example, given a library that provides a `akka.persistence.query.my-read-journal` obtaining the related journal is as simple as: -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #basic-usage } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #basic-usage } Journal implementers are encouraged to put this identifier in a variable known to the user, such that one can access it via `readJournalFor[NoopJournal](NoopJournal.identifier)`, however this is not enforced. @@ -67,11 +67,11 @@ The predefined queries are: By default this stream should be assumed to be a "live" stream, which means that the journal should keep emitting new persistence ids as they come into the system: -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #all-persistence-ids-live } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #all-persistence-ids-live } If your usage does not require a live stream, you can use the `currentPersistenceIds` query: -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #all-persistence-ids-snap } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #all-persistence-ids-snap } #### EventsByPersistenceIdQuery and CurrentEventsByPersistenceIdQuery @@ -79,7 +79,7 @@ If your usage does not require a live stream, you can use the `currentPersistenc however, since it is a stream it is possible to keep it alive and watch for additional incoming events persisted by the persistent actor identified by the given `persistenceId`. -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #events-by-persistent-id } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #events-by-persistent-id } Most journals will have to revert to polling in order to achieve this, which can typically be configured with a `refresh-interval` configuration property. @@ -98,7 +98,7 @@ Some journals may support tagging of events via an @ref:[Event Adapters](persist `akka.persistence.journal.Tagged` with the given `tags`. The journal may support other ways of doing tagging - again, how exactly this is implemented depends on the used journal. Here is an example of such a tagging event adapter: -@@snip [LeveldbPersistenceQueryDocSpec.scala](code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #tagger } +@@snip [LeveldbPersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala) { #tagger } @@@ note @@ -115,7 +115,7 @@ In the example below we query all events which have been tagged (we assume this @ref:[EventAdapter](persistence.md#event-adapters-scala), or that the journal is smart enough that it can figure out what we mean by this tag - for example if the journal stored the events as json it may try to find those with the field `tag` set to this value etc.). -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #events-by-tag } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #events-by-tag } As you can see, we can use all the usual stream combinators available from @ref:[Streams](stream/index.md) on the resulting query stream, including for example taking the first 10 and cancelling the stream. It is worth pointing out that the built-in `EventsByTag` @@ -135,11 +135,11 @@ stream, for example if it's finite or infinite, strictly ordered or not ordered is defined as the second type parameter of the returned `Source`, which allows journals to provide users with their specialised query object, as demonstrated in the sample below: -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-types } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-types } -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-definition } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-definition } -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-usage } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #advanced-journal-query-usage } ## Performance and denormalization @@ -170,7 +170,7 @@ it may be more efficient or interesting to query it (instead of the source event If the read datastore exposes a [Reactive Streams](http://reactive-streams.org) interface then implementing a simple projection is as simple as, using the read-journal and feeding it into the databases driver interface, for example like so: -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-rs } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-rs } ### Materialize view using mapAsync @@ -180,7 +180,7 @@ you may have to implement the write logic using plain functions or Actors instea In case your write logic is state-less and you just need to convert the events from one data type to another before writing into the alternative datastore, then the projection is as simple as: -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-simple } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-simple } ### Resumable projections @@ -192,9 +192,9 @@ The example below additionally highlights how you would use Actors to implement you need to do some complex logic that would be best handled inside an Actor before persisting the event into the other datastore: -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-actor-run } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-actor-run } -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-actor } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #projection-into-different-store-actor } ## Query plugins @@ -226,11 +226,11 @@ As illustrated below one of the implementations can delegate to the other. Below is a simple journal implementation: -@@snip [PersistenceQueryDocSpec.scala](code/docs/persistence/query/PersistenceQueryDocSpec.scala) { #my-read-journal } +@@snip [PersistenceQueryDocSpec.scala]($code$/scala/docs/persistence/query/PersistenceQueryDocSpec.scala) { #my-read-journal } And the `eventsByTag` could be backed by such an Actor for example: -@@snip [MyEventsByTagPublisher.scala](code/docs/persistence/query/MyEventsByTagPublisher.scala) { #events-by-tag-publisher } +@@snip [MyEventsByTagPublisher.scala]($code$/scala/docs/persistence/query/MyEventsByTagPublisher.scala) { #events-by-tag-publisher } The `ReadJournalProvider` class must have a constructor with one of these signatures: diff --git a/akka-docs/src/main/paradox/scala/persistence-schema-evolution.md b/akka-docs/src/main/paradox/scala/persistence-schema-evolution.md index cc2f7ecf68..5902c00da6 100644 --- a/akka-docs/src/main/paradox/scala/persistence-schema-evolution.md +++ b/akka-docs/src/main/paradox/scala/persistence-schema-evolution.md @@ -155,15 +155,15 @@ For more in-depth explanations on how serialization picks the serializer to use First we start by defining our domain model class, here representing a person: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-model } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-model } Next we implement a serializer (or extend an existing one to be able to handle the new `Person` class): -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer } And finally we register the serializer and bind it to handle the `docs.persistence.Person` class: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-config } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #simplest-custom-serializer-config } Deserialization will be performed by the same serializer which serialized the message initially because of the `identifier` being stored together with the message. @@ -198,20 +198,20 @@ While being able to read messages with missing fields is half of the solution, y values somehow. This is usually modeled as some kind of default value, or by representing the field as an `Option[T]` See below for an example how reading an optional field from a serialized protocol buffers message might look like. -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-read-optional-model } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-read-optional-model } Next we prepare an protocol definition using the protobuf Interface Description Language, which we'll use to generate the serializer code to be used on the Akka Serialization layer (notice that the schema aproach allows us to easily rename fields, as long as the numeric identifiers of the fields do not change): -@@snip [FlightAppModels.proto]../../protobuf/FlightAppModels.proto) { #protobuf-read-optional-proto } +@@snip [FlightAppModels.proto]($code$/protobuf/FlightAppModels.proto) { #protobuf-read-optional-proto } The serializer implementation uses the protobuf generated classes to marshall the payloads. Optional fields can be handled explicitly or missing values by calling the `has...` methods on the protobuf object, which we do for `seatType` in order to use a `Unknown` type in case the event was stored before we had introduced the field to this event type: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-read-optional } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-read-optional } ### Rename fields @@ -237,7 +237,7 @@ add the overhead of having to maintain the schema. When using serializers like t > This is how such a rename would look in protobuf: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-rename-proto } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #protobuf-rename-proto } It is important to learn about the strengths and limitations of your serializers, in order to be able to move swiftly and refactor your models fearlessly as you go on with the project. @@ -265,7 +265,7 @@ or using a library like [Stamina](https://github.com/scalapenos/stamina) which h > The following snippet showcases how one could apply renames if working with plain JSON (using `spray.json.JsObject`): -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #rename-plain-json } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #rename-plain-json } As you can see, manually handling renames induces some boilerplate onto the EventAdapter, however much of it you will find is common infrastructure code that can be either provided by an external library (for promotion management) @@ -331,12 +331,12 @@ Other events (**E**) can simply be passed through. The serializer detects that the string manifest points to a removed event type and skips attempting to deserialize it: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #string-serializer-skip-deleved-event-by-manifest } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #string-serializer-skip-deleved-event-by-manifest } The EventAdapter we implemented is aware of `EventDeserializationSkipped` events (our "Tombstones"), and emits and empty `EventSeq` whenever such object is encoutered: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #string-serializer-skip-deleved-event-by-manifest-adapter } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #string-serializer-skip-deleved-event-by-manifest-adapter } ### Detach domain model from data model @@ -365,13 +365,13 @@ include additional data for the event (e.g. tags), for ease of later querying. We will use the following domain and data models to showcase how the separation can be implemented by the adapter: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models } The `EventAdapter` takes care of converting from one model to the other one (in both directions), alowing the models to be completely detached from each other, such that they can be optimised independently as long as the mapping logic is able to convert between them: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models-adapter } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models-adapter } The same technique could also be used directly in the Serializer if the end result of marshalling is bytes. Then the serializer can simply convert the bytes do the domain object by using the generated protobuf builders. @@ -393,7 +393,7 @@ In this aproach, the `EventAdapter` is used as the marshalling layer: it seriali The journal plugin notices that the incoming event type is JSON (for example by performing a `match` on the incoming event) and stores the incoming object directly. -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models-adapter-json } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #detach-models-adapter-json } @@@ note @@ -447,7 +447,7 @@ During recovery however, we now need to convert the old `V1` model into the `V2` Depending if the old event contains a name change, we either emit the `UserNameChanged` or we don't, and the address change is handled similarily: -@@snip [PersistenceSchemaEvolutionDocSpec.scala](code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #split-events-during-recovery } +@@snip [PersistenceSchemaEvolutionDocSpec.scala]($code$/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala) { #split-events-during-recovery } By returning an `EventSeq` from the event adapter, the recovered event can be converted to multiple events before being delivered to the persistent actor. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/persistence.md b/akka-docs/src/main/paradox/scala/persistence.md index 5d18327a1a..4b79bfdb8d 100644 --- a/akka-docs/src/main/paradox/scala/persistence.md +++ b/akka-docs/src/main/paradox/scala/persistence.md @@ -64,7 +64,7 @@ Akka persistence supports event sourcing with the `PersistentActor` trait. An ac `persist` method to persist and handle events. The behavior of a `PersistentActor` is defined by implementing `receiveRecover` and `receiveCommand`. This is demonstrated in the following example. -@@snip [PersistentActorExample.scala](code/docs/persistence/PersistentActorExample.scala) { #persistent-actor-example } +@@snip [PersistentActorExample.scala]($code$/scala/docs/persistence/PersistentActorExample.scala) { #persistent-actor-example } The example defines two data types, `Cmd` and `Evt` to represent commands and events, respectively. The `state` of the `ExamplePersistentActor` is a list of persisted event data contained in `ExampleState`. @@ -115,7 +115,7 @@ behavior when replaying the events. When replay is completed it will use the new A persistent actor must have an identifier that doesn't change across different actor incarnations. The identifier must be defined with the `persistenceId` method. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #persistence-id-override } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #persistence-id-override } @@@ note @@ -159,7 +159,7 @@ To skip loading snapshots and replay all events you can use `SnapshotSelectionCr This can be useful if snapshot serialization format has changed in an incompatible way. It should typically not be used when events have been deleted. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #recovery-no-snap } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-no-snap } Another example, which can be fun for experiments but probably not in a real application, is setting an upper bound to the replay which allows the actor to be replayed to a certain point "in the past" @@ -167,24 +167,24 @@ instead to its most up to date state. Note that after that it is a bad idea to p events because a later recovery will probably be confused by the new events that follow the events that were previously skipped. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #recovery-custom } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-custom } Recovery can be disabled by returning `Recovery.none()` in the `recovery` method of a `PersistentActor`: -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #recovery-disabled } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-disabled } #### Recovery status A persistent actor can query its own recovery status via the methods -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #recovery-status } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-status } Sometimes there is a need for performing additional initialization when the recovery has completed before processing any other message sent to the persistent actor. The persistent actor will receive a special `RecoveryCompleted` message right after recovery and before any other received messages. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #recovery-completed } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #recovery-completed } The actor will always receive a `RecoveryCompleted` message, even if there are no events in the journal and the snapshot store is empty, or if it's a new persistent actor with a previously @@ -256,7 +256,7 @@ stash incoming Commands while the Journal is still working on persisting and/or In the below example, the event callbacks may be called "at any time", even after the next Command has been processed. The ordering between events is still guaranteed ("evt-b-1" will be sent after "evt-a-2", which will be sent after "evt-a-1" etc.). -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #persist-async } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #persist-async } @@@ note @@ -283,18 +283,18 @@ use it for *read* operations, and actions which do not have corresponding events Using this method is very similar to the persist family of methods, yet it does **not** persist the passed in event. It will be kept in memory and used when invoking the handler. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #defer } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #defer } Notice that the `sender()` is **safe** to access in the handler callback, and will be pointing to the original sender of the command for which this `deferAsync` handler was called. The calling side will get the responses in this (guaranteed) order: -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #defer-caller } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #defer-caller } You can also call `deferAsync` with `persist`. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #defer-with-persist } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #defer-with-persist } @@@ warning @@ -314,11 +314,11 @@ however there are situations where it may be useful. It is important to understa those situations, as well as their implication on the stashing behaviour (that `persist()` enforces). In the following example two persist calls are issued, and each of them issues another persist inside its callback: -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #nested-persist-persist } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persist-persist } When sending two commands to this `PersistentActor`, the persist handlers will be executed in the following order: -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #nested-persist-persist-caller } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persist-persist-caller } First the "outer layer" of persist calls is issued and their callbacks are applied. After these have successfully completed, the inner callbacks will be invoked (once the events they are persisting have been confirmed to be persisted by the journal). @@ -328,11 +328,11 @@ is extended until all nested `persist` callbacks have been handled. It is also possible to nest `persistAsync` calls, using the same pattern: -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #nested-persistAsync-persistAsync } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persistAsync-persistAsync } In this case no stashing is happening, yet events are still persisted and callbacks are executed in the expected order: -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #nested-persistAsync-persistAsync-caller } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #nested-persistAsync-persistAsync-caller } While it is possible to nest mixed `persist` and `persistAsync` with keeping their respective semantics it is not a recommended practice, as it may lead to overly complex nesting. @@ -359,7 +359,7 @@ will most likely fail anyway since the journal is probably unavailable. It is be actor and after a back-off timeout start it again. The `akka.pattern.BackoffSupervisor` actor is provided to support such restarts. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #backoff } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #backoff } If persistence of an event is rejected before it is stored, e.g. due to serialization error, `onPersistRejected` will be invoked (logging a warning by default), and the actor continues with @@ -473,11 +473,11 @@ Consider using explicit shut-down messages instead of `PoisonPill` when working The example below highlights how messages arrive in the Actor's mailbox and how they interact with its internal stashing mechanism when `persist()` is used. Notice the early stop behaviour that occurs when `PoisonPill` is used: -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown } -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown-example-bad } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown-example-bad } -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown-example-good } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown-example-good } ### Replay Filter @@ -520,16 +520,16 @@ in context of persistent actors but this is also applicable to persistent views. Persistent actors can save snapshots of internal state by calling the `saveSnapshot` method. If saving of a snapshot succeeds, the persistent actor receives a `SaveSnapshotSuccess` message, otherwise a `SaveSnapshotFailure` message -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #save-snapshot } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #save-snapshot } where `metadata` is of type `SnapshotMetadata`: -@@snip [SnapshotProtocol.scala]../../../../../akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala) { #snapshot-metadata } +@@snip [SnapshotProtocol.scala]($akka$/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala) { #snapshot-metadata } During recovery, the persistent actor is offered a previously saved snapshot via a `SnapshotOffer` message from which it can initialize internal state. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #snapshot-offer } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #snapshot-offer } The replayed messages that follow the `SnapshotOffer` message, if any, are younger than the offered snapshot. They finally recover the persistent actor to its current (i.e. latest) state. @@ -537,7 +537,7 @@ They finally recover the persistent actor to its current (i.e. latest) state. In general, a persistent actor is only offered a snapshot if that persistent actor has previously saved one or more snapshots and at least one of these snapshots matches the `SnapshotSelectionCriteria` that can be specified for recovery. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #snapshot-criteria } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #snapshot-criteria } If not specified, they default to `SnapshotSelectionCriteria.Latest` which selects the latest (= youngest) snapshot. To disable snapshot-based recovery, applications should use `SnapshotSelectionCriteria.None`. A recovery where no @@ -633,7 +633,7 @@ between `deliver` and `confirmDelivery` is possible. The `deliveryId` must do th of the message, the destination actor will send the same``deliveryId`` wrapped in a confirmation message back to the sender. The sender will then use it to call `confirmDelivery` method to complete the delivery routine. -@@snip [PersistenceDocSpec.scala](code/docs/persistence/PersistenceDocSpec.scala) { #at-least-once-example } +@@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #at-least-once-example } The `deliveryId` generated by the persistence module is a strictly monotonically increasing sequence number without gaps. The same sequence is used for all destinations of the actor, i.e. when sending to multiple @@ -707,11 +707,11 @@ json instead of serializing the object to its binary representation. Implementing an EventAdapter is rather stright forward: -@@snip [PersistenceEventAdapterDocSpec.scala](code/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #identity-event-adapter } +@@snip [PersistenceEventAdapterDocSpec.scala]($code$/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #identity-event-adapter } Then in order for it to be used on events coming to and from the journal you must bind it using the below configuration syntax: -@@snip [PersistenceEventAdapterDocSpec.scala](code/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #event-adapters-config } +@@snip [PersistenceEventAdapterDocSpec.scala]($code$/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala) { #event-adapters-config } It is possible to bind multiple adapters to one class *for recovery*, in which case the `fromJournal` methods of all bound adapters will be applied to a given matching event (in order of definition in the configuration). Since each adapter may @@ -737,7 +737,7 @@ Relationship between incoming messages, FSM's states and transitions, persistenc To demonstrate the features of the `PersistentFSM` trait, consider an actor which represents a Web store customer. The contract of our "WebStoreCustomerFSMActor" is that it accepts the following commands: -@@snip [PersistentFSMSpec.scala]../../../../../akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-commands } +@@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-commands } `AddItem` sent when the customer adds an item to a shopping cart `Buy` - when the customer finishes the purchase @@ -746,7 +746,7 @@ The contract of our "WebStoreCustomerFSMActor" is that it accepts the following The customer can be in one of the following states: -@@snip [PersistentFSMSpec.scala]../../../../../akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-states } +@@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-states } `LookingAround` customer is browsing the site, but hasn't added anything to the shopping cart `Shopping` customer has recently added items to the shopping cart @@ -764,15 +764,15 @@ String identifiers should be unique! Customer's actions are "recorded" as a sequence of "domain events" which are persisted. Those events are replayed on an actor's start in order to restore the latest customer's state: -@@snip [PersistentFSMSpec.scala]../../../../../akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-domain-events } +@@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-domain-events } Customer state data represents the items in a customer's shopping cart: -@@snip [PersistentFSMSpec.scala]../../../../../akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-states-data } +@@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-states-data } Here is how everything is wired together: -@@snip [PersistentFSMSpec.scala]../../../../../akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-fsm-body } +@@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-fsm-body } @@@ note @@ -781,16 +781,16 @@ Override the `applyEvent` method to define how state data is affected by domain @@@ -@@snip [PersistentFSMSpec.scala]../../../../../akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-apply-event } +@@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-apply-event } `andThen` can be used to define actions which will be executed following event's persistence - convenient for "side effects" like sending a message or logging. Notice that actions defined in `andThen` block are not executed on recovery: -@@snip [PersistentFSMSpec.scala]../../../../../akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-andthen-example } +@@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-andthen-example } A snapshot of state data can be persisted by calling the `saveStateSnapshot()` method: -@@snip [PersistentFSMSpec.scala]../../../../../akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-snapshot-example } +@@snip [PersistentFSMSpec.scala]($akka$/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala) { #customer-snapshot-example } On recovery state data is initialized according to the latest available snapshot, then the remaining domain events are replayed, triggering the `applyEvent` method. @@ -820,7 +820,7 @@ For an example of a snapshot store plugin which writes snapshots as individual f Applications can provide their own plugins by implementing a plugin API and activating them by configuration. Plugin development requires the following imports: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #plugin-imports } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #plugin-imports } ### Eager initialization of persistence plugin @@ -836,19 +836,19 @@ A journal plugin extends `AsyncWriteJournal`. `AsyncWriteJournal` is an actor and the methods to be implemented are: -@@snip [AsyncWriteJournal.scala]../../../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala) { #journal-plugin-api } +@@snip [AsyncWriteJournal.scala]($akka$/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala) { #journal-plugin-api } If the storage backend API only supports synchronous, blocking writes, the methods should be implemented as: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #sync-journal-plugin-api } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #sync-journal-plugin-api } A journal plugin must also implement the methods defined in `AsyncRecovery` for replays and sequence number recovery: -@@snip [AsyncRecovery.scala]../../../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala) { #journal-plugin-api } +@@snip [AsyncRecovery.scala]($akka$/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala) { #journal-plugin-api } A journal plugin can be activated with the following minimal configuration: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #journal-plugin-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-plugin-config } The journal plugin instance is an actor so the methods corresponding to requests from persistent actors are executed sequentially. It may delegate to asynchronous libraries, spawn futures, or delegate to other @@ -872,11 +872,11 @@ Don't run journal tasks/futures on the system default dispatcher, since that mig A snapshot store plugin must extend the `SnapshotStore` actor and implement the following methods: -@@snip [SnapshotStore.scala]../../../../../akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala) { #snapshot-store-plugin-api } +@@snip [SnapshotStore.scala]($akka$/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala) { #snapshot-store-plugin-api } A snapshot store plugin can be activated with the following minimal configuration: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-plugin-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-plugin-config } The snapshot store instance is an actor so the methods corresponding to requests from persistent actors are executed sequentially. It may delegate to asynchronous libraries, spawn futures, or delegate to other @@ -908,7 +908,7 @@ The TCK is usable from Java as well as Scala projects. For Scala you need to inc To include the Journal TCK tests in your test suite simply extend the provided `JournalSpec`: -@@snip [PersistencePluginDocSpec.scala](./code/docs/persistence/PersistencePluginDocSpec.scala) { #journal-tck-scala } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-tck-scala } Please note that some of the tests are optional, and by overriding the `supports...` methods you give the TCK the needed information about which tests to run. You can implement these methods using boolean falues or the @@ -921,12 +921,12 @@ typical scenarios. In order to include the `SnapshotStore` TCK tests in your test suite simply extend the `SnapshotStoreSpec`: -@@snip [PersistencePluginDocSpec.scala](./code/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-tck-scala } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-store-tck-scala } In case your plugin requires some setting up (starting a mock database, removing temporary files etc.) you can override the `beforeAll` and `afterAll` methods to hook into the tests lifecycle: -@@snip [PersistencePluginDocSpec.scala](./code/docs/persistence/PersistencePluginDocSpec.scala) { #journal-tck-before-after-scala } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-tck-before-after-scala } We *highly recommend* including these specifications in your test suite, as they cover a broad range of cases you might have otherwise forgotten to test for when writing a plugin from scratch. @@ -940,7 +940,7 @@ might have otherwise forgotten to test for when writing a plugin from scratch. The LevelDB journal plugin config entry is `akka.persistence.journal.leveldb`. It writes messages to a local LevelDB instance. Enable this plugin by defining config property: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-plugin-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-plugin-config } LevelDB based plugins will also require the following additional dependency declaration: @@ -952,7 +952,7 @@ LevelDB based plugins will also require the following additional dependency decl The default location of LevelDB files is a directory named `journal` in the current working directory. This location can be changed by configuration where the specified path can be relative or absolute: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #journal-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #journal-config } With this plugin, each actor system runs its own private LevelDB instance. @@ -978,22 +978,22 @@ This plugin has been supplanted by [Persistence Plugin Proxy](#persistence-plugi A shared LevelDB instance is started by instantiating the `SharedLeveldbStore` actor. -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-creation } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-creation } By default, the shared instance writes journaled messages to a local directory named `journal` in the current working directory. The storage location can be changed by configuration: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-config } Actor systems that use a shared LevelDB store must activate the `akka.persistence.journal.leveldb-shared` plugin. -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #shared-journal-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-journal-config } This plugin must be initialized by injecting the (remote) `SharedLeveldbStore` actor reference. Injection is done by calling the `SharedLeveldbJournal.setStore` method with the actor reference as argument. -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-usage } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-usage } Internal journal commands (sent by persistent actors) are buffered until injection completes. Injection is idempotent i.e. only the first injection is used. @@ -1004,12 +1004,12 @@ i.e. only the first injection is used. The local snapshot store plugin config entry is `akka.persistence.snapshot-store.local`. It writes snapshot files to the local filesystem. Enable this plugin by defining config property: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-snapshot-plugin-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-snapshot-plugin-config } The default storage location is a directory named `snapshots` in the current working directory. This can be changed by configuration where the specified path can be relative or absolute: -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #snapshot-config } Note that it is not mandatory to specify a snapshot store plugin. If you don't use snapshots you don't have to configure it. @@ -1063,7 +1063,7 @@ Serialization of snapshots and payloads of `Persistent` messages is configurable it must add -@@snip [PersistenceSerializerDocSpec.scala](code/docs/persistence/PersistenceSerializerDocSpec.scala) { #custom-serializer-config } +@@snip [PersistenceSerializerDocSpec.scala]($code$/scala/docs/persistence/PersistenceSerializerDocSpec.scala) { #custom-serializer-config } to the application configuration. If not specified, a default serializer is used. @@ -1073,11 +1073,11 @@ For more advanced schema evolution techniques refer to the @ref:[Persistence - S When running tests with LevelDB default settings in `sbt`, make sure to set `fork := true` in your sbt project. Otherwise, you'll see an `UnsatisfiedLinkError`. Alternatively, you can switch to a LevelDB Java port by setting -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #native-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #native-config } or -@@snip [PersistencePluginDocSpec.scala](code/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-native-config } +@@snip [PersistencePluginDocSpec.scala]($code$/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-native-config } in your Akka configuration. The LevelDB Java port is for testing purposes only. @@ -1102,18 +1102,18 @@ to the @ref:[reference configuration](../scala/general/configuration.md#config-a By default, a persistent actor or view will use the "default" journal and snapshot store plugins configured in the following sections of the `reference.conf` configuration resource: -@@snip [PersistenceMultiDocSpec.scala](code/docs/persistence/PersistenceMultiDocSpec.scala) { #default-config } +@@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-config } Note that in this case the actor or view overrides only the `persistenceId` method: -@@snip [PersistenceMultiDocSpec.scala](code/docs/persistence/PersistenceMultiDocSpec.scala) { #default-plugins } +@@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-plugins } When the persistent actor or view overrides the `journalPluginId` and `snapshotPluginId` methods, the actor or view will be serviced by these specific persistence plugins instead of the defaults: -@@snip [PersistenceMultiDocSpec.scala](code/docs/persistence/PersistenceMultiDocSpec.scala) { #override-plugins } +@@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #override-plugins } Note that `journalPluginId` and `snapshotPluginId` must refer to properly configured `reference.conf` plugin entries with a standard `class` property as well as settings which are specific for those plugins, i.e.: -@@snip [PersistenceMultiDocSpec.scala](code/docs/persistence/PersistenceMultiDocSpec.scala) { #override-config } \ No newline at end of file +@@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #override-config } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/remoting-artery.md b/akka-docs/src/main/paradox/scala/remoting-artery.md index dc2cf66a52..1fa12aab51 100644 --- a/akka-docs/src/main/paradox/scala/remoting-artery.md +++ b/akka-docs/src/main/paradox/scala/remoting-artery.md @@ -205,7 +205,7 @@ which in this sample corresponds to `sampleActorSystem@127.0.0.1:2553`. Once you have configured the properties above you would do the following in code: -@@snip [RemoteDeploymentDocSpec.scala](code/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } +@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } The actor class `SampleActor` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. @@ -241,15 +241,15 @@ precedence. With these imports: -@@snip [RemoteDeploymentDocSpec.scala](code/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } +@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } and a remote address like this: -@@snip [RemoteDeploymentDocSpec.scala](code/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address-artery } +@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address-artery } you can advise the system to create a child on that remote node like so: -@@snip [RemoteDeploymentDocSpec.scala](code/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } +@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } ### Remote deployment whitelist @@ -264,7 +264,7 @@ The list of allowed classes has to be configured on the "remote" system, in othe others will be attempting to remote deploy Actors. That system, locally, knows best which Actors it should or should not allow others to remote deploy onto it. The full settings section may for example look like this: -@@snip [RemoteDeploymentWhitelistSpec.scala]../../../../../akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } +@@snip [RemoteDeploymentWhitelistSpec.scala]($akka$/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } Actor classes not included in the whitelist will not be allowed to be remote deployed onto this system. @@ -483,7 +483,7 @@ remained the same, we recommend reading the @ref:[Serialization](serialization.m Implementing an `akka.serialization.ByteBufferSerializer` works the same way as any other serializer, -@@snip [Serializer.scala]../../../../../akka-actor/src/main/scala/akka/serialization/Serializer.scala) { #ByteBufferSerializer } +@@snip [Serializer.scala]($akka$/akka-actor/src/main/scala/akka/serialization/Serializer.scala) { #ByteBufferSerializer } Implementing a serializer for Artery is therefore as simple as implementing this interface, and binding the serializer as usual (which is explained in @ref:[Serialization](serialization.md)). @@ -494,7 +494,7 @@ The array based methods will be used when `ByteBuffer` is not used, e.g. in Akka Note that the array based methods can be implemented by delegation like this: -@@snip [ByteBufferSerializerDocSpec.scala](code/docs/actor/ByteBufferSerializerDocSpec.scala) { #bytebufserializer-with-manifest } +@@snip [ByteBufferSerializerDocSpec.scala]($code$/scala/docs/actor/ByteBufferSerializerDocSpec.scala) { #bytebufserializer-with-manifest } ### Disabling the Java Serializer @@ -551,14 +551,14 @@ It is absolutely feasible to combine remoting with @ref:[Routing](routing.md). A pool of remote deployed routees can be configured as: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool-artery } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool-artery } This configuration setting will clone the actor defined in the `Props` of the `remotePool` 10 times and deploy it evenly distributed across the two given target nodes. A group of remote actors can be configured as: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group-artery } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group-artery } This configuration setting will send messages to the defined remote actor paths. It requires that you create the destination actors on the remote nodes with matching paths. @@ -761,7 +761,7 @@ There are lots of configuration properties that are related to remoting in Akka. Setting properties like the listening IP and port number programmatically is best done by using something like the following: -@@snip [RemoteDeploymentDocTest.java](../java/code/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic-artery } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic-artery } @@@ diff --git a/akka-docs/src/main/paradox/scala/remoting.md b/akka-docs/src/main/paradox/scala/remoting.md index 1bb5cc03ce..b5e341dd98 100644 --- a/akka-docs/src/main/paradox/scala/remoting.md +++ b/akka-docs/src/main/paradox/scala/remoting.md @@ -141,7 +141,7 @@ which in this sample corresponds to `sampleActorSystem@127.0.0.1:2553`. Once you have configured the properties above you would do the following in code: -@@snip [RemoteDeploymentDocSpec.scala](code/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } +@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } The actor class `SampleActor` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. @@ -181,15 +181,15 @@ precedence. With these imports: -@@snip [RemoteDeploymentDocSpec.scala](code/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } +@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } and a remote address like this: -@@snip [RemoteDeploymentDocSpec.scala](code/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address } +@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address } you can advise the system to create a child on that remote node like so: -@@snip [RemoteDeploymentDocSpec.scala](code/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } +@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } ### Remote deployment whitelist @@ -205,7 +205,7 @@ The list of allowed classes has to be configured on the "remote" system, in othe others will be attempting to remote deploy Actors. That system, locally, knows best which Actors it should or should not allow others to remote deploy onto it. The full settings section may for example look like this: -@@snip [RemoteDeploymentWhitelistSpec.scala]../../../../../akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } +@@snip [RemoteDeploymentWhitelistSpec.scala]($akka$/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } Actor classes not included in the whitelist will not be allowed to be remote deployed onto this system. @@ -380,14 +380,14 @@ It is absolutely feasible to combine remoting with @ref:[Routing](routing.md). A pool of remote deployed routees can be configured as: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool } This configuration setting will clone the actor defined in the `Props` of the `remotePool` 10 times and deploy it evenly distributed across the two given target nodes. A group of remote actors can be configured as: -@@snip [RouterDocSpec.scala](../scala/code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } This configuration setting will send messages to the defined remote actor paths. It requires that you create the destination actors on the remote nodes with matching paths. @@ -626,7 +626,7 @@ There are lots of configuration properties that are related to remoting in Akka. Setting properties like the listening IP and port number programmatically is best done by using something like the following: -@@snip [RemoteDeploymentDocTest.java](../java/code/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic } +@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic } @@@ diff --git a/akka-docs/src/main/paradox/scala/routing.md b/akka-docs/src/main/paradox/scala/routing.md index cd281b3fdc..0d3dbb9ae5 100644 --- a/akka-docs/src/main/paradox/scala/routing.md +++ b/akka-docs/src/main/paradox/scala/routing.md @@ -13,7 +13,7 @@ also possible to [create your own](#custom-router-scala). The following example illustrates how to use a `Router` and manage the routees from within an actor. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #router-in-actor } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #router-in-actor } We create a `Router` and specify that it should use `RoundRobinRoutingLogic` when routing the messages to the routees. @@ -81,14 +81,14 @@ few exceptions. These are documented in the [Specially Handled Messages](#router The following code and configuration snippets show how to create a [round-robin](#round-robin-router-scala) router that forwards messages to five `Worker` routees. The routees will be created as the router's children. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #round-robin-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-1 } Here is the same example, but with the router configuration provided programmatically instead of from configuration. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #round-robin-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-2 } #### Remote Deployed Routees @@ -98,20 +98,20 @@ fashion. In order to deploy routees remotely, wrap the router configuration in a `RemoteRouterConfig`, attaching the remote addresses of the nodes to deploy to. Remote deployment requires the `akka-remote` module to be included in the classpath. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #remoteRoutees } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #remoteRoutees } #### Senders By default, when a routee sends a message, it will @ref:[implicitly set itself as the sender ](actors.md#actors-tell-sender-scala). -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #reply-without-sender } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #reply-without-sender } However, it is often useful for routees to set the *router* as a sender. For example, you might want to set the router as the sender if you want to hide the details of the routees behind the router. The following code snippet shows how to set the parent router as sender. -@@snip [ActorDocSpec.scala](code/docs/actor/ActorDocSpec.scala) { #reply-with-sender } +@@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #reply-with-sender } #### Supervision @@ -139,7 +139,7 @@ by specifying the strategy when defining the router. Setting the strategy is easily done: -@@snip [RoutingSpec.scala]../../../../../akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala) { #supervision } +@@snip [RoutingSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala) { #supervision } @@@ note @@ -160,25 +160,25 @@ to these paths. The example below shows how to create a router by providing it with the path strings of three routee actors. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #round-robin-group-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-group-1 } Here is the same example, but with the router configuration provided programmatically instead of from configuration. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #round-robin-group-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-group-2 } The routee actors are created externally from the router: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #create-workers } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #create-workers } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #create-worker-actors } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #create-worker-actors } The paths may contain protocol and address information for actors running on remote hosts. Remoting requires the `akka-remote` module to be included in the classpath. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group } ## Router usage @@ -188,7 +188,7 @@ The router actors in this section are created from within a top level actor name Note that deployment paths in the configuration starts with `/parent/` followed by the name of the router actor. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #create-parent } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #create-parent } ### RoundRobinPool and RoundRobinGroup @@ -197,23 +197,23 @@ Routes in a [round-robin](http://en.wikipedia.org/wiki/Round-robin) fashion to i RoundRobinPool defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #round-robin-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-1 } RoundRobinPool defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #round-robin-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-pool-2 } RoundRobinGroup defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-round-robin-group } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #round-robin-group-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #round-robin-group-1 } RoundRobinGroup defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #paths #round-robin-group-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #round-robin-group-2 } ### RandomPool and RandomGroup @@ -221,23 +221,23 @@ This router type selects one of its routees randomly for each message. RandomPool defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-random-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-random-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #random-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #random-pool-1 } RandomPool defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #random-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #random-pool-2 } RandomGroup defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-random-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-random-group } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #random-group-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #random-group-1 } RandomGroup defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #paths #random-group-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #random-group-2 } ### BalancingPool @@ -269,19 +269,19 @@ as described in [Specially Handled Messages](#router-special-messages-scala), BalancingPool defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-balancing-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #balancing-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #balancing-pool-1 } BalancingPool defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #balancing-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #balancing-pool-2 } Addition configuration for the balancing dispatcher, which is used by the pool, can be configured in the `pool-dispatcher` section of the router deployment configuration. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-balancing-pool2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool2 } The `BalancingPool` automatically uses a special `BalancingDispatcher` for its routees - disregarding any dispatcher that is set on the routee Props object. @@ -294,14 +294,14 @@ can be configured as explained in @ref:[Dispatchers](dispatchers.md). In situati routees are expected to perform blocking operations it may be useful to replace it with a `thread-pool-executor` hinting the number of allocated threads explicitly: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-balancing-pool3 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool3 } It is also possible to change the `mailbox` used by the balancing dispatcher for scenarios where the default unbounded mailbox is not well suited. An example of such a scenario could arise whether there exists the need to manage priority for each message. You can then implement a priority mailbox and configure your dispatcher: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-balancing-pool4 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-balancing-pool4 } @@@ note @@ -328,13 +328,13 @@ since their mailbox size is unknown SmallestMailboxPool defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-smallest-mailbox-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-smallest-mailbox-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #smallest-mailbox-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #smallest-mailbox-pool-1 } SmallestMailboxPool defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #smallest-mailbox-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #smallest-mailbox-pool-2 } There is no Group variant of the SmallestMailboxPool because the size of the mailbox and the internal dispatching state of the actor is not practically available from the paths @@ -346,23 +346,23 @@ A broadcast router forwards the message it receives to *all* its routees. BroadcastPool defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-broadcast-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-broadcast-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #broadcast-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcast-pool-1 } BroadcastPool defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #broadcast-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcast-pool-2 } BroadcastGroup defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-broadcast-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-broadcast-group } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #broadcast-group-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcast-group-1 } BroadcastGroup defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #paths #broadcast-group-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #broadcast-group-2 } @@@ note @@ -383,23 +383,23 @@ It is expecting at least one reply within a configured duration, otherwise it wi ScatterGatherFirstCompletedPool defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #scatter-gather-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-pool-1 } ScatterGatherFirstCompletedPool defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #scatter-gather-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-pool-2 } ScatterGatherFirstCompletedGroup defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-scatter-gather-group } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #scatter-gather-group-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #scatter-gather-group-1 } ScatterGatherFirstCompletedGroup defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #paths #scatter-gather-group-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #scatter-gather-group-2 } ### TailChoppingPool and TailChoppingGroup @@ -415,23 +415,23 @@ This optimisation was described nicely in a blog post by Peter Bailis: TailChoppingPool defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #tail-chopping-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-pool-1 } TailChoppingPool defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #tail-chopping-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-pool-2 } TailChoppingGroup defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-tail-chopping-group } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #tail-chopping-group-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #tail-chopping-group-1 } TailChoppingGroup defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #paths #tail-chopping-group-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #tail-chopping-group-2 } ### ConsistentHashingPool and ConsistentHashingGroup @@ -457,9 +457,9 @@ the same time for one router. The `hashMapping` is tried first. Code example: -@@snip [ConsistentHashingRouterDocSpec.scala](code/docs/routing/ConsistentHashingRouterDocSpec.scala) { #cache-actor } +@@snip [ConsistentHashingRouterDocSpec.scala]($code$/scala/docs/routing/ConsistentHashingRouterDocSpec.scala) { #cache-actor } -@@snip [ConsistentHashingRouterDocSpec.scala](code/docs/routing/ConsistentHashingRouterDocSpec.scala) { #consistent-hashing-router } +@@snip [ConsistentHashingRouterDocSpec.scala]($code$/scala/docs/routing/ConsistentHashingRouterDocSpec.scala) { #consistent-hashing-router } In the above example you see that the `Get` message implements `ConsistentHashable` itself, while the `Entry` message is wrapped in a `ConsistentHashableEnvelope`. The `Evict` @@ -467,23 +467,23 @@ message is handled by the `hashMapping` partial function. ConsistentHashingPool defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #consistent-hashing-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-pool-1 } ConsistentHashingPool defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #consistent-hashing-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-pool-2 } ConsistentHashingGroup defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-group } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-consistent-hashing-group } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #consistent-hashing-group-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #consistent-hashing-group-1 } ConsistentHashingGroup defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #paths #consistent-hashing-group-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #paths #consistent-hashing-group-2 } `virtual-nodes-factor` is the number of virtual nodes per routee that is used in the consistent hash node ring to make the distribution more uniform. @@ -508,7 +508,7 @@ matter how that router would normally route its messages. The example below shows how you would use a `Broadcast` message to send a very important message to every routee of a router. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #broadcastDavyJonesWarning } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcastDavyJonesWarning } In this example the router receives the `Broadcast` message, extracts its payload (`"Watch out for Davy Jones' locker"`), and then sends the payload on to all of the router's @@ -528,7 +528,7 @@ A `PoisonPill` message has special handling for all actors, including for router receives a `PoisonPill` message, that actor will be stopped. See the @ref:[PoisonPill](actors.md#poison-pill-scala) documentation for details. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #poisonPill } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #poisonPill } For a router, which normally passes on messages to routees, it is important to realise that `PoisonPill` messages are processed by the router only. `PoisonPill` messages sent to a router @@ -546,7 +546,7 @@ router. Instead you should wrap a `PoisonPill` message inside a `Broadcast` mess routee will receive the `PoisonPill` message. Note that this will stop all routees, even if the routees aren't children of the router, i.e. even routees programmatically provided to the router. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #broadcastPoisonPill } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcastPoisonPill } With the code shown above, each routee will receive a `PoisonPill` message. Each routee will continue to process its messages as normal, eventually processing the `PoisonPill`. This will @@ -575,14 +575,14 @@ Routees that are children of the router will also be suspended, and will be affe supervision directive that is applied to the router. Routees that are not the routers children, i.e. those that were created externally to the router, will not be affected. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #kill } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #kill } As with the `PoisonPill` message, there is a distinction between killing a router, which indirectly kills its children (who happen to be routees), and killing routees directly (some of whom may not be children.) To kill routees directly the router should be sent a `Kill` message wrapped in a `Broadcast` message. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #broadcastKill } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #broadcastKill } ### Management Messages @@ -614,16 +614,16 @@ pressure is lower than certain threshold. Both thresholds are configurable. Pool with default resizer defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-resize-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-resize-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #resize-pool-1 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #resize-pool-1 } Several more configuration options are available and described in `akka.actor.deployment.default.resizer` section of the reference configuration. Pool with resizer defined in code: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #resize-pool-2 } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #resize-pool-2 } *It is also worth pointing out that if you define the ``router`` in the configuration file then this value will be used instead of any programmatically sent parameters.* @@ -656,9 +656,9 @@ The memory usage is O(n) where n is the number of sizes you allow, i.e. upperBou Pool with `OptimalSizeExploringResizer` defined in configuration: -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-optimal-size-exploring-resize-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-optimal-size-exploring-resize-pool } -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #optimal-size-exploring-resize-pool } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #optimal-size-exploring-resize-pool } Several more configuration options are available and described in `akka.actor.deployment.default.optimal-size-exploring-resizer` section of the reference configuration. @@ -712,7 +712,7 @@ The router created in this example is replicating each message to a few destinat Start with the routing logic: -@@snip [CustomRouterDocSpec.scala](code/docs/routing/CustomRouterDocSpec.scala) { #routing-logic } +@@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #routing-logic } `select` will be called for each message and in this example pick a few destinations by round-robin, by reusing the existing `RoundRobinRoutingLogic` and wrap the result in a `SeveralRoutees` @@ -722,7 +722,7 @@ The implementation of the routing logic must be thread safe, since it might be u A unit test of the routing logic: -@@snip [CustomRouterDocSpec.scala](code/docs/routing/CustomRouterDocSpec.scala) { #unit-test-logic } +@@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #unit-test-logic } You could stop here and use the `RedundancyRoutingLogic` with a `akka.routing.Router` as described in [A Simple Router](#simple-router-scala). @@ -732,23 +732,23 @@ Let us continue and make this into a self contained, configurable, router actor. Create a class that extends `Pool`, `Group` or `CustomRouterConfig`. That class is a factory for the routing logic and holds the configuration for the router. Here we make it a `Group`. -@@snip [CustomRouterDocSpec.scala](code/docs/routing/CustomRouterDocSpec.scala) { #group } +@@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #group } This can be used exactly as the router actors provided by Akka. -@@snip [CustomRouterDocSpec.scala](code/docs/routing/CustomRouterDocSpec.scala) { #usage-1 } +@@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #usage-1 } Note that we added a constructor in `RedundancyGroup` that takes a `Config` parameter. That makes it possible to define it in configuration. -@@snip [CustomRouterDocSpec.scala](code/docs/routing/CustomRouterDocSpec.scala) { #config } +@@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #config } Note the fully qualified class name in the `router` property. The router class must extend `akka.routing.RouterConfig` (`Pool`, `Group` or `CustomRouterConfig`) and have constructor with one `com.typesafe.config.Config` parameter. The deployment section of the configuration is passed to the constructor. -@@snip [CustomRouterDocSpec.scala](code/docs/routing/CustomRouterDocSpec.scala) { #usage-2 } +@@snip [CustomRouterDocSpec.scala]($code$/scala/docs/routing/CustomRouterDocSpec.scala) { #usage-2 } ## Configuring Dispatchers @@ -758,7 +758,7 @@ The dispatcher for created children of the pool will be taken from To make it easy to define the dispatcher of the routees of the pool you can define the dispatcher inline in the deployment section of the config. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #config-pool-dispatcher } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-pool-dispatcher } That is the only thing you need to do enable a dedicated dispatcher for a pool. @@ -779,7 +779,7 @@ the actor system’s default dispatcher. All standard routers allow setting this property in their constructor or factory method, custom routers have to implement the method in a suitable way. -@@snip [RouterDocSpec.scala](code/docs/routing/RouterDocSpec.scala) { #dispatchers } +@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #dispatchers } @@@ note diff --git a/akka-docs/src/main/paradox/scala/scheduler.md b/akka-docs/src/main/paradox/scala/scheduler.md index bc0b1f2a71..aee5cf960f 100644 --- a/akka-docs/src/main/paradox/scala/scheduler.md +++ b/akka-docs/src/main/paradox/scala/scheduler.md @@ -36,11 +36,11 @@ by the `akka.scheduler.tick-duration` configuration property. ## Some examples -@@snip [SchedulerDocSpec.scala](code/docs/actor/SchedulerDocSpec.scala) { #imports1 #schedule-one-off-message } +@@snip [SchedulerDocSpec.scala]($code$/scala/docs/actor/SchedulerDocSpec.scala) { #imports1 #schedule-one-off-message } -@@snip [SchedulerDocSpec.scala](code/docs/actor/SchedulerDocSpec.scala) { #schedule-one-off-thunk } +@@snip [SchedulerDocSpec.scala]($code$/scala/docs/actor/SchedulerDocSpec.scala) { #schedule-one-off-thunk } -@@snip [SchedulerDocSpec.scala](code/docs/actor/SchedulerDocSpec.scala) { #schedule-recurring } +@@snip [SchedulerDocSpec.scala]($code$/scala/docs/actor/SchedulerDocSpec.scala) { #schedule-recurring } @@@ warning @@ -55,7 +55,7 @@ necessary parameters) and then call the method when the message is received. ## From `akka.actor.ActorSystem` -@@snip [ActorSystem.scala]../../../../../akka-actor/src/main/scala/akka/actor/ActorSystem.scala) { #scheduler } +@@snip [ActorSystem.scala]($akka$/akka-actor/src/main/scala/akka/actor/ActorSystem.scala) { #scheduler } @@@ warning @@ -71,7 +71,7 @@ The actual scheduler implementation is loaded reflectively upon different one using the `akka.scheduler.implementation` configuration property. The referenced class must implement the following interface: -@@snip [Scheduler.scala]../../../../../akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #scheduler } +@@snip [Scheduler.scala]($akka$/akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #scheduler } ## The Cancellable interface @@ -87,4 +87,4 @@ scheduled task was canceled or will (eventually) have run. @@@ -@@snip [Scheduler.scala]../../../../../akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #cancellable } \ No newline at end of file +@@snip [Scheduler.scala]($akka$/akka-actor/src/main/scala/akka/actor/Scheduler.scala) { #cancellable } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/serialization.md b/akka-docs/src/main/paradox/scala/serialization.md index d205e2b4fb..d47c72d117 100644 --- a/akka-docs/src/main/paradox/scala/serialization.md +++ b/akka-docs/src/main/paradox/scala/serialization.md @@ -12,12 +12,12 @@ For Akka to know which `Serializer` to use for what, you need edit your [Configu in the "akka.actor.serializers"-section you bind names to implementations of the `akka.serialization.Serializer` you wish to use, like this: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #serialize-serializers-config } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-serializers-config } After you've bound names to different implementations of `Serializer` you need to wire which classes should be serialized using which `Serializer`, this is done in the "akka.actor.serialization-bindings"-section: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #serialization-bindings-config } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialization-bindings-config } You only need to specify the name of an interface or abstract base class of the messages. In case of ambiguity, i.e. the message implements several of the @@ -53,11 +53,11 @@ akka.actor.serialization-bindings { Normally, messages sent between local actors (i.e. same JVM) do not undergo serialization. For testing, sometimes, it may be desirable to force serialization on all messages (both remote and local). If you want to do this in order to verify that your messages are serializable you can enable the following config option: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #serialize-messages-config } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-messages-config } If you want to verify that your `Props` are serializable you can enable the following config option: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #serialize-creators-config } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-creators-config } @@@ warning @@ -70,7 +70,7 @@ We recommend having these config options turned on **only** when you're running If you want to programmatically serialize/deserialize using Akka Serialization, here's some examples: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #imports #programmatic } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #imports #programmatic } For more information, have a look at the `ScalaDoc` for `akka.serialization._` @@ -82,7 +82,7 @@ The first code snippet on this page contains a configuration file that reference A custom `Serializer` has to inherit from `akka.serialization.Serializer` and can be defined like the following: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #imports #my-own-serializer } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #imports #my-own-serializer } The manifest is a type hint so that the same serializer can be used for different classes. The manifest parameter in `fromBinary` is the class of the object that @@ -111,7 +111,7 @@ class name if you used `includeManifest=true`, otherwise it will be the empty st This is how a `SerializerWithStringManifest` looks like: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #my-own-serializer2 } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #my-own-serializer2 } You must also bind it to a name in your [Configuration]() and then list which classes that should be serialized using it. @@ -133,7 +133,7 @@ In the general case, the local address to be used depends on the type of remote address which shall be the recipient of the serialized information. Use `Serialization.serializedActorPath(actorRef)` like this: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #imports #actorref-serializer } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #imports #actorref-serializer } This assumes that serialization happens in the context of sending a message through the remote transport. There are other uses of serialization, though, @@ -148,7 +148,7 @@ transport per se, which makes this question a bit more interesting. To find out the appropriate address to use when sending to `remoteAddr` you can use `ActorRefProvider.getExternalAddressFor(remoteAddr)` like this: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #external-address } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #external-address } @@@ note @@ -174,7 +174,7 @@ lenient as Akka’s RemoteActorRefProvider). There is also a default remote address which is the one used by cluster support (and typical systems have just this one); you can get it like this: -@@snip [SerializationDocSpec.scala](code/docs/serialization/SerializationDocSpec.scala) { #external-address-default } +@@snip [SerializationDocSpec.scala]($code$/scala/docs/serialization/SerializationDocSpec.scala) { #external-address-default } ### Deep serialization of Actors diff --git a/akka-docs/src/main/paradox/scala/stream/stream-composition.md b/akka-docs/src/main/paradox/scala/stream/stream-composition.md index 0cd55f4373..2815f63367 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-composition.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-composition.md @@ -78,7 +78,7 @@ with the rest of the graph), but this demonstrates the uniform underlying model. If we try to build a code snippet that corresponds to the above diagram, our first try might look like this: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #non-nested-flow } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #non-nested-flow } It is clear however that there is no nesting present in our first attempt, since the library cannot figure out where we intended to put composite module boundaries, it is our responsibility to do that. If we are using the @@ -87,7 +87,7 @@ methods `withAttributes()` or `named()` (where the latter is just a shorthand fo The following code demonstrates how to achieve the desired nesting: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #nested-flow } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #nested-flow } Once we have hidden the internals of our components, they act like any other built-in component of similar shape. If we hide some of the internals of our composites, the result looks just like if any other predefine component has been @@ -102,7 +102,7 @@ used: If we look at usage of built-in components, and our custom components, there is no difference in usage as the code snippet below demonstrates. -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #reuse } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #reuse } ## Composing complex systems @@ -126,13 +126,13 @@ can be materialized) that encapsulates a non-trivial stream processing network. directed and non-directed cycles. The `runnable()` method of the `GraphDSL` object allows the creation of a general, closed, and runnable graph. For example the network on the diagram can be realized like this: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #complex-graph } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #complex-graph } In the code above we used the implicit port numbering feature (to make the graph more readable and similar to the diagram) and we imported `Source` s, `Sink` s and `Flow` s explicitly. It is possible to refer to the ports explicitly, and it is not necessary to import our linear stages via `add()`, so another version might look like this: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #complex-graph-alt } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #complex-graph-alt } | @@ -149,7 +149,7 @@ from the previous example, what remains is a partial graph: We can recreate a similar graph in code, using the DSL in a similar way than before: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #partial-graph } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #partial-graph } The only new addition is the return value of the builder block, which is a `Shape`. All graphs (including `Source`, `BidiFlow`, etc) have a shape, which encodes the *typed* ports of the module. In our example @@ -168,7 +168,7 @@ it is a good practice to give names to modules to help debugging. Since our partial graph has the right shape, it can be already used in the simpler, linear DSL: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #partial-use } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #partial-use } It is not possible to use it as a `Flow` yet, though (i.e. we cannot call `.filter()` on it), but `Flow` has a `fromGraph()` method that just adds the DSL to a `FlowShape`. There are similar methods on `Source`, @@ -184,7 +184,7 @@ To demonstrate this, we will create the following graph: The code version of the above closed graph might look like this: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #partial-flow-dsl } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #partial-flow-dsl } @@@ note @@ -196,7 +196,7 @@ throw an exception if this is violated. We are still in debt of demonstrating that `RunnableGraph` is a component just like any other, which can be embedded in graphs. In the following snippet we embed one closed graph in another: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #embed-closed } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #embed-closed } The type of the imported module indicates that the imported module has a `ClosedShape`, and so we are not able to wire it to anything else inside the enclosing closed graph. Nevertheless, this "island" is embedded properly, @@ -247,27 +247,27 @@ To implement the above, first, we create a composite `Source`, where the enclose materialized type of `Promise[[Option[Int]]`. By using the combiner function `Keep.left`, the resulting materialized type is of the nested module (indicated by the color *red* on the diagram): -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #mat-combine-1 } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-1 } Next, we create a composite `Flow` from two smaller components. Here, the second enclosed `Flow` has a materialized type of `Future[OutgoingConnection]`, and we propagate this to the parent by using `Keep.right` as the combiner function (indicated by the color *yellow* on the diagram): -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #mat-combine-2 } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-2 } As a third step, we create a composite `Sink`, using our `nestedFlow` as a building block. In this snippet, both the enclosed `Flow` and the folding `Sink` has a materialized value that is interesting for us, so we use `Keep.both` to get a `Pair` of them as the materialized type of `nestedSink` (indicated by the color *blue* on the diagram) -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #mat-combine-3 } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-3 } As the last example, we wire together `nestedSource` and `nestedSink` and we use a custom combiner function to create a yet another materialized type of the resulting `RunnableGraph`. This combiner function just ignores the `Future[Sink]` part, and wraps the other two values in a custom case class `MyClass` (indicated by color *purple* on the diagram): -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #mat-combine-4 } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #mat-combine-4 } @@@ note @@ -289,7 +289,7 @@ by nested modules, unless they override them with a custom value. The code below, a modification of an earlier example sets the `inputBuffer` attribute on certain modules, but not on others: -@@snip [CompositionDocSpec.scala](../code/docs/stream/CompositionDocSpec.scala) { #attributes-inheritance } +@@snip [CompositionDocSpec.scala]($code$/scala/docs/stream/CompositionDocSpec.scala) { #attributes-inheritance } The effect is, that each module inherits the `inputBuffer` attribute from its enclosing parent, unless it has the same attribute explicitly set. `nestedSource` gets the default attributes from the materializer itself. `nestedSink` diff --git a/akka-docs/src/main/paradox/scala/stream/stream-cookbook.md b/akka-docs/src/main/paradox/scala/stream/stream-cookbook.md index 57980ad4e6..4782bd0929 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-cookbook.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-cookbook.md @@ -25,12 +25,12 @@ general, more targeted recipes are available as separate sections (@ref:[Buffers The simplest solution is to simply use a `map` operation and use `println` to print the elements received to the console. While this recipe is rather simplistic, it is often suitable for a quick debug session. -@@snip [RecipeLoggingElements.scala](../code/docs/stream/cookbook/RecipeLoggingElements.scala) { #println-debug } +@@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #println-debug } Another approach to logging is to use `log()` operation which allows configuring logging for elements flowing through the stream as well as completion and erroring. -@@snip [RecipeLoggingElements.scala](../code/docs/stream/cookbook/RecipeLoggingElements.scala) { #log-custom } +@@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #log-custom } ### Flattening a stream of sequences @@ -41,7 +41,7 @@ The `mapConcat` operation can be used to implement a one-to-many transformation in the form of `In => immutable.Seq[Out]`. In this case we want to map a `Seq` of elements to the elements in the collection itself, so we can just call `mapConcat(identity)`. -@@snip [RecipeFlattenSeq.scala](../code/docs/stream/cookbook/RecipeFlattenSeq.scala) { #flattening-seqs } +@@snip [RecipeFlattenSeq.scala]($code$/scala/docs/stream/cookbook/RecipeFlattenSeq.scala) { #flattening-seqs } ### Draining a stream to a strict collection @@ -54,11 +54,11 @@ The function `limit` or `take` should always be used in conjunction in order to For example, this is best avoided: -@@snip [RecipeSeq.scala](../code/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-unsafe } +@@snip [RecipeSeq.scala]($code$/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-unsafe } Rather, use `limit` or `take` to ensure that the resulting `Seq` will contain only up to `max` elements: -@@snip [RecipeSeq.scala](../code/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-safe } +@@snip [RecipeSeq.scala]($code$/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-safe } ### Calculating the digest of a ByteString stream @@ -75,7 +75,7 @@ At this point we want to emit the digest value, but we cannot do it with `push` be no downstream demand. Instead we call `emit` which will temporarily replace the handlers, emit the provided value when demand comes in and then reset the stage state. It will then complete the stage. -@@snip [RecipeDigest.scala](../code/docs/stream/cookbook/RecipeDigest.scala) { #calculating-digest } +@@snip [RecipeDigest.scala]($code$/scala/docs/stream/cookbook/RecipeDigest.scala) { #calculating-digest } ### Parsing lines from a stream of ByteStrings @@ -86,7 +86,7 @@ needs to be parsed. The `Framing` helper object contains a convenience method to parse messages from a stream of `ByteString` s: -@@snip [RecipeParseLines.scala](../code/docs/stream/cookbook/RecipeParseLines.scala) { #parse-lines } +@@snip [RecipeParseLines.scala]($code$/scala/docs/stream/cookbook/RecipeParseLines.scala) { #parse-lines } ### Dealing with compressed data streams @@ -95,7 +95,7 @@ The `Framing` helper object contains a convenience method to parse messages from The `Compression` helper object contains convenience methods for decompressing data streams compressed with Gzip or Deflate. -@@snip [RecipeDecompress.scala](../code/docs/stream/cookbook/RecipeDecompress.scala) { #decompress-gzip } +@@snip [RecipeDecompress.scala]($code$/scala/docs/stream/cookbook/RecipeDecompress.scala) { #decompress-gzip } ### Implementing reduce-by-key @@ -124,7 +124,7 @@ any given time. If the `groupBy` operator encounters more keys than this number then the stream cannot continue without violating its resource bound, in this case `groupBy` will terminate with a failure. -@@snip [RecipeReduceByKey.scala](../code/docs/stream/cookbook/RecipeReduceByKey.scala) { #word-count } +@@snip [RecipeReduceByKey.scala]($code$/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #word-count } By extracting the parts specific to *wordcount* into @@ -134,7 +134,7 @@ By extracting the parts specific to *wordcount* into we get a generalized version below: -@@snip [RecipeReduceByKey.scala](../code/docs/stream/cookbook/RecipeReduceByKey.scala) { #reduce-by-key-general } +@@snip [RecipeReduceByKey.scala]($code$/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #reduce-by-key-general } @@@ note @@ -157,7 +157,7 @@ will be emitted. This is achieved by using `mapConcat` * Then we take this new stream of message topic pairs (containing a separate pair for each topic a given message belongs to) and feed it into groupBy, using the topic as the group key. -@@snip [RecipeMultiGroupBy.scala](../code/docs/stream/cookbook/RecipeMultiGroupBy.scala) { #multi-groupby } +@@snip [RecipeMultiGroupBy.scala]($code$/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala) { #multi-groupby } ## Working with Graphs @@ -172,14 +172,14 @@ trigger signal arrives. This recipe solves the problem by simply zipping the stream of `Message` elements with the stream of `Trigger` signals. Since `Zip` produces pairs, we simply map the output stream selecting the first element of the pair. -@@snip [RecipeManualTrigger.scala](../code/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream } +@@snip [RecipeManualTrigger.scala]($code$/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream } Alternatively, instead of using a `Zip`, and then using `map` to get the first element of the pairs, we can avoid creating the pairs in the first place by using `ZipWith` which takes a two argument function to produce the output element. If this function would return a pair of the two argument it would be exactly the behavior of `Zip` so `ZipWith` is a generalization of zipping. -@@snip [RecipeManualTrigger.scala](../code/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream-zipwith } +@@snip [RecipeManualTrigger.scala]($code$/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream-zipwith } ### Balancing jobs to a fixed pool of workers @@ -197,7 +197,7 @@ we wire the outputs of these workers to a `Merge` element that will collect the To make the worker stages run in parallel we mark them as asynchronous with *async*. -@@snip [RecipeWorkerPool.scala](../code/docs/stream/cookbook/RecipeWorkerPool.scala) { #worker-pool } +@@snip [RecipeWorkerPool.scala]($code$/scala/docs/stream/cookbook/RecipeWorkerPool.scala) { #worker-pool } ## Working with rate @@ -216,7 +216,7 @@ the speed of the upstream unaffected by the downstream. When the upstream is faster, the reducing process of the `conflate` starts. Our reducer function simply takes the freshest element. This in a simple dropping operation. -@@snip [RecipeSimpleDrop.scala](../code/docs/stream/cookbook/RecipeSimpleDrop.scala) { #simple-drop } +@@snip [RecipeSimpleDrop.scala]($code$/scala/docs/stream/cookbook/RecipeSimpleDrop.scala) { #simple-drop } There is a more general version of `conflate` named `conflateWithSeed` that allows to express more complex aggregations, more similar to a `fold`. @@ -233,7 +233,7 @@ defining a dropping strategy instead of the default `Backpressure`. This allows between the different consumers (the buffer smooths out small rate variances), but also allows faster consumers to progress by dropping from the buffer of the slow consumers if necessary. -@@snip [RecipeDroppyBroadcast.scala](../code/docs/stream/cookbook/RecipeDroppyBroadcast.scala) { #droppy-bcast } +@@snip [RecipeDroppyBroadcast.scala]($code$/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala) { #droppy-bcast } ### Collecting missed ticks @@ -252,7 +252,7 @@ count of the missed ticks so far. As a result, we have a flow of `Int` where the number represents the missed ticks. A number 0 means that we were able to consume the tick fast enough (i.e. zero means: 1 non-missed tick + 0 missed ticks) -@@snip [RecipeMissedTicks.scala](../code/docs/stream/cookbook/RecipeMissedTicks.scala) { #missed-ticks } +@@snip [RecipeMissedTicks.scala]($code$/scala/docs/stream/cookbook/RecipeMissedTicks.scala) { #missed-ticks } ### Create a stream processor that repeats the last element seen @@ -266,7 +266,7 @@ to feed the downstream if no upstream element is ready yet. In the `onPush()` ha `currentValue` variable and immediately relieve the upstream by calling `pull()`. The downstream `onPull` handler is very similar, we immediately relieve the downstream by emitting `currentValue`. -@@snip [RecipeHold.scala](../code/docs/stream/cookbook/RecipeHold.scala) { #hold-version-1 } +@@snip [RecipeHold.scala]($code$/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-1 } While it is relatively simple, the drawback of the first version is that it needs an arbitrary initial element which is not always possible to provide. Hence, we create a second version where the downstream might need to wait in one single @@ -279,7 +279,7 @@ version is that we check if we have received the first value and only emit if we first element comes in we must check if there possibly already was demand from downstream so that we in that case can push the element directly. -@@snip [RecipeHold.scala](../code/docs/stream/cookbook/RecipeHold.scala) { #hold-version-2 } +@@snip [RecipeHold.scala]($code$/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-2 } ### Globally limiting the rate of a set of streams @@ -299,13 +299,13 @@ of the sender is added to a queue. Once the timer for replenishing the pending p message, we increment the pending permits counter and send a reply to each of the waiting senders. If there are more waiting senders than permits available we will stay in the `closed` state. -@@snip [RecipeGlobalRateLimit.scala](../code/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-actor } +@@snip [RecipeGlobalRateLimit.scala]($code$/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-actor } To create a Flow that uses this global limiter actor we use the `mapAsync` function with the combination of the `ask` pattern. We also define a timeout, so if a reply is not received during the configured maximum wait period the returned future from `ask` will fail, which will fail the corresponding stream as well. -@@snip [RecipeGlobalRateLimit.scala](../code/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-flow } +@@snip [RecipeGlobalRateLimit.scala]($code$/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-flow } @@@ note @@ -332,7 +332,7 @@ and an empty or nonempty remaining buffer. Both `onPush()` and `onPull()` calls `emitChunk()` the only difference is that the push handler also stores the incoming chunk by appending to the end of the buffer. -@@snip [RecipeByteStrings.scala](../code/docs/stream/cookbook/RecipeByteStrings.scala) { #bytestring-chunker } +@@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytestring-chunker } ### Limit the number of bytes passing through a stream of ByteStrings @@ -343,7 +343,7 @@ This recipe uses a `GraphStage` to implement the desired feature. In the only ha `onPush()` we just update a counter and see if it gets larger than `maximumBytes`. If a violation happens we signal failure, otherwise we forward the chunk we have received. -@@snip [RecipeByteStrings.scala](../code/docs/stream/cookbook/RecipeByteStrings.scala) { #bytes-limiter } +@@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytes-limiter } ### Compact ByteStrings in a stream of ByteStrings @@ -354,7 +354,7 @@ chain we want to have clean copies that are no longer referencing the original ` The recipe is a simple use of map, calling the `compact()` method of the `ByteString` elements. This does copying of the underlying arrays, so this should be the last element of a long chain if used. -@@snip [RecipeByteStrings.scala](../code/docs/stream/cookbook/RecipeByteStrings.scala) { #compacting-bytestrings } +@@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #compacting-bytestrings } ### Injecting keep-alive messages into a stream of ByteStrings @@ -363,4 +363,4 @@ but only if this does not interfere with normal traffic. There is a built-in operation that allows to do this directly: -@@snip [RecipeKeepAlive.scala](../code/docs/stream/cookbook/RecipeKeepAlive.scala) { #inject-keepalive } \ No newline at end of file +@@snip [RecipeKeepAlive.scala]($code$/scala/docs/stream/cookbook/RecipeKeepAlive.scala) { #inject-keepalive } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-customize.md b/akka-docs/src/main/paradox/scala/stream/stream-customize.md index 1a750109b0..d8311f9377 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-customize.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-customize.md @@ -25,7 +25,7 @@ As a first motivating example, we will build a new `Source` that will simply emi cancelled. To start, we need to define the "interface" of our stage, which is called *shape* in Akka Streams terminology (this is explained in more detail in the section @ref:[Modularity, Composition and Hierarchy](stream-composition.md)). This is how this looks like: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #boilerplate-example } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #boilerplate-example } As you see, in itself the `GraphStage` only defines the ports of this stage and a shape that contains the ports. It also has, a currently unimplemented method called `createLogic`. If you recall, stages are reusable in multiple @@ -49,7 +49,7 @@ override `onPull()` which indicates that we are free to emit a single element. T to stop the stage, we don't need to override it. In the `onPull` callback we will simply emit the next number. This is how it looks like in the end: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #custom-source-example } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #custom-source-example } Instances of the above `GraphStage` are subclasses of `Graph[SourceShape[Int],NotUsed]` which means that they are already usable in many situations, but do not provide the DSL methods we usually have for other @@ -57,7 +57,7 @@ that they are already usable in many situations, but do not provide the DSL meth `Source.fromGraph` (see @ref:[Modularity, Composition and Hierarchy](stream-composition.md) for more details about graphs and DSLs). Now we can use the source as any other built-in one: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #simple-source-usage } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #simple-source-usage } Similarly, to create a custom `Sink` one can register a subclass `InHandler` with the stage `Inlet`. The `onPush()` callback is used to signal the handler a new element has been pushed to the stage, @@ -65,7 +65,7 @@ and can hence be grabbed and used. `onPush()` can be overridden to provide custo Please note, most Sinks would need to request upstream elements as soon as they are created: this can be done by calling `pull(inlet)` in the `preStart()` callback. -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #custom-sink-example } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #custom-sink-example } ### Port states, InHandler and OutHandler @@ -189,7 +189,7 @@ To illustrate these concepts we create a small `GraphStage` that implements the Map calls `push(out)` from the `onPush()` handler and it also calls `pull()` from the `onPull` handler resulting in the conceptual wiring above, and fully expressed in code below: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #one-to-one } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #one-to-one } Map is a typical example of a one-to-one transformation of a stream where demand is passed along upstream elements passed on downstream. @@ -208,7 +208,7 @@ we return the “ball” to our upstream so that we get the new element. This is example by adding a conditional in the `onPush` handler and decide between a `pull(in)` or `push(out)` call (and of course not having a mapping `f` function). -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #many-to-one } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #many-to-one } To complete the picture we define a one-to-many transformation as the next step. We chose a straightforward example stage that emits every upstream element twice downstream. The conceptual wiring of this stage looks like this: @@ -223,7 +223,7 @@ This is a stage that has state: an option with the last element it has seen indi has duplicated this last element already or not. We must also make sure to emit the extra element if the upstream completes. -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #one-to-many } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #one-to-many } In this case a pull from downstream might be consumed by the stage itself rather than passed along upstream as the stage might contain an element it wants to @@ -236,7 +236,7 @@ This example can be simplified by replacing the usage of a mutable state with ca `emitMultiple` which will replace the handlers, emit each of multiple elements and then reinstate the original handlers: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #simpler-one-to-many } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #simpler-one-to-many } Finally, to demonstrate all of the stages above, we put them together into a processing chain, which conceptually would correspond to the following structure: @@ -249,7 +249,7 @@ which conceptually would correspond to the following structure: In code this is only a few lines, using the `via` use our custom stages in a stream: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #graph-stage-chain } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #graph-stage-chain } If we attempt to draw the sequence of events, it shows that there is one "event token" in circulation in a potential chain of stages, just like our conceptual "railroad tracks" representation predicts. @@ -292,7 +292,7 @@ See @ref:[Using the SLF4J API directly](../logging.md#slf4j-directly-scala) for The stage then gets access to the `log` field which it can safely use from any `GraphStage` callbacks: -@@snip [GraphStageLoggingDocSpec.scala](../code/docs/stream/GraphStageLoggingDocSpec.scala) { #stage-with-logging } +@@snip [GraphStageLoggingDocSpec.scala]($code$/scala/docs/stream/GraphStageLoggingDocSpec.scala) { #stage-with-logging } @@@ note @@ -317,7 +317,7 @@ In this sample the stage toggles between open and closed, where open means no el stage starts out as closed but as soon as an element is pushed downstream the gate becomes open for a duration of time during which it will consume and drop upstream messages: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #timed } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #timed } ### Using asynchronous side-channels @@ -335,7 +335,7 @@ Sharing the AsyncCallback from the constructor risks race conditions, therefore This example shows an asynchronous side channel graph stage that starts dropping elements when a future completes: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #async-side-channel } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #async-side-channel } ### Integration with actors @@ -372,7 +372,7 @@ necessary (non-blocking) synchronization and visibility guarantees to this share In this sample the materialized value is a future containing the first element to go through the stream: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #materialized } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #materialized } ### Using attributes to affect the behavior of a stage @@ -423,7 +423,7 @@ initialization. The buffer has demand for up to two elements without any downstr The following code example demonstrates a buffer class corresponding to the message sequence chart above. -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #detached } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #detached } ## Thread safety of custom processing stages @@ -474,11 +474,11 @@ extensions to `Source` and `Flow` see [this sketch by R. Kuhn](https://gist.gith A lot simpler is the task of just adding an extension method to `Source` as shown below: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #extending-source } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #extending-source } The analog works for `Flow` as well: -@@snip [GraphStageDocSpec.scala](../code/docs/stream/GraphStageDocSpec.scala) { #extending-flow } +@@snip [GraphStageDocSpec.scala]($code$/scala/docs/stream/GraphStageDocSpec.scala) { #extending-flow } If you try to write this for `SubFlow`, though, you will run into the same issue as when trying to unify the two solutions above, only on a higher level (the type constructors needed for that unification would have rank diff --git a/akka-docs/src/main/paradox/scala/stream/stream-dynamic.md b/akka-docs/src/main/paradox/scala/stream/stream-dynamic.md index a46844d637..8f70004ef1 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-dynamic.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-dynamic.md @@ -7,7 +7,7 @@ A `KillSwitch` allows the completion of graphs of `FlowShape` from the outside. can be linked to a graph of `FlowShape` needing completion control. The `KillSwitch` trait allows to complete or fail the graph(s). -@@snip [KillSwitch.scala]../../../../../../akka-stream/src/main/scala/akka/stream/KillSwitch.scala) { #kill-switch } +@@snip [KillSwitch.scala]($akka$/akka-stream/src/main/scala/akka/stream/KillSwitch.scala) { #kill-switch } After the first call to either `shutdown` or `abort`, all subsequent calls to any of these methods will be ignored. Graph completion is performed by both @@ -25,11 +25,11 @@ below for usage examples. * **Shutdown** -@@snip [KillSwitchDocSpec.scala](../code/docs/stream/KillSwitchDocSpec.scala) { #unique-shutdown } +@@snip [KillSwitchDocSpec.scala]($code$/scala/docs/stream/KillSwitchDocSpec.scala) { #unique-shutdown } * **Abort** -@@snip [KillSwitchDocSpec.scala](../code/docs/stream/KillSwitchDocSpec.scala) { #unique-abort } +@@snip [KillSwitchDocSpec.scala]($code$/scala/docs/stream/KillSwitchDocSpec.scala) { #unique-abort } ### SharedKillSwitch @@ -40,11 +40,11 @@ Refer to the below for usage examples. * **Shutdown** -@@snip [KillSwitchDocSpec.scala](../code/docs/stream/KillSwitchDocSpec.scala) { #shared-shutdown } +@@snip [KillSwitchDocSpec.scala]($code$/scala/docs/stream/KillSwitchDocSpec.scala) { #shared-shutdown } * **Abort** -@@snip [KillSwitchDocSpec.scala](../code/docs/stream/KillSwitchDocSpec.scala) { #shared-abort } +@@snip [KillSwitchDocSpec.scala]($code$/scala/docs/stream/KillSwitchDocSpec.scala) { #shared-abort } @@@ note @@ -69,7 +69,7 @@ producers are backpressured. The hub itself comes as a `Source` to which the sin It is not possible to attach any producers until this `Source` has been materialized (started). This is ensured by the fact that we only get the corresponding `Sink` as a materialized value. Usage might look like this: -@@snip [HubsDocSpec.scala](../code/docs/stream/HubsDocSpec.scala) { #merge-hub } +@@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #merge-hub } This sequence, while might look odd at first, ensures proper startup order. Once we get the `Sink`, we can use it as many times as wanted. Everything that is fed to it will be delivered to the consumer we attached @@ -82,7 +82,7 @@ rate of the producer will be automatically adapted to the slowest consumer. In t to which the single producer must be attached first. Consumers can only be attached once the `Sink` has been materialized (i.e. the producer has been started). One example of using the `BroadcastHub`: -@@snip [HubsDocSpec.scala](../code/docs/stream/HubsDocSpec.scala) { #broadcast-hub } +@@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #broadcast-hub } The resulting `Source` can be materialized any number of times, each materialization effectively attaching a new subscriber. If there are no subscribers attached to this hub then it will not drop any elements but instead @@ -102,13 +102,13 @@ First, we connect a `MergeHub` and a `BroadcastHub` together to form a publish-s we materialize this small stream, we get back a pair of `Source` and `Sink` that together define the publish and subscribe sides of our channel. -@@snip [HubsDocSpec.scala](../code/docs/stream/HubsDocSpec.scala) { #pub-sub-1 } +@@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-1 } We now use a few tricks to add more features. First of all, we attach a `Sink.ignore` at the broadcast side of the channel to keep it drained when there are no subscribers. If this behavior is not the desired one this line can be simply dropped. -@@snip [HubsDocSpec.scala](../code/docs/stream/HubsDocSpec.scala) { #pub-sub-2 } +@@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-2 } We now wrap the `Sink` and `Source` in a `Flow` using `Flow.fromSinkAndSource`. This bundles up the two sides of the channel into one and forces users of it to always define a publisher and subscriber side @@ -118,10 +118,10 @@ same time. Finally, we add `backpressureTimeout` on the consumer side to ensure that subscribers that block the channel for more than 3 seconds are forcefully removed (and their stream failed). -@@snip [HubsDocSpec.scala](../code/docs/stream/HubsDocSpec.scala) { #pub-sub-3 } +@@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-3 } The resulting Flow now has a type of `Flow[String, String, UniqueKillSwitch]` representing a publish-subscribe channel which can be used any number of times to attach new producers or consumers. In addition, it materializes to a `UniqueKillSwitch` (see [UniqueKillSwitch](#unique-kill-switch-scala)) that can be used to deregister a single user externally: -@@snip [HubsDocSpec.scala](../code/docs/stream/HubsDocSpec.scala) { #pub-sub-4 } \ No newline at end of file +@@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-4 } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-error.md b/akka-docs/src/main/paradox/scala/stream/stream-error.md index 30a259837b..cf94e8ae56 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-error.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-error.md @@ -24,11 +24,11 @@ performed by creating a new instance of the stage. By default the stopping strategy is used for all exceptions, i.e. the stream will be completed with failure when an exception is thrown. -@@snip [FlowErrorDocSpec.scala](../code/docs/stream/FlowErrorDocSpec.scala) { #stop } +@@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #stop } The default supervision strategy for a stream can be defined on the settings of the materializer. -@@snip [FlowErrorDocSpec.scala](../code/docs/stream/FlowErrorDocSpec.scala) { #resume } +@@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #resume } Here you can see that all `ArithmeticException` will resume the processing, i.e. the elements that cause the division by zero are effectively dropped. @@ -42,12 +42,12 @@ cycles, as explained in @ref:[Graph cycles, liveness and deadlocks](stream-graph The supervision strategy can also be defined for all operators of a flow. -@@snip [FlowErrorDocSpec.scala](../code/docs/stream/FlowErrorDocSpec.scala) { #resume-section } +@@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #resume-section } `Restart` works in a similar way as `Resume` with the addition that accumulated state, if any, of the failing processing stage will be reset. -@@snip [FlowErrorDocSpec.scala](../code/docs/stream/FlowErrorDocSpec.scala) { #restart-section } +@@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #restart-section } ## Errors from mapAsync @@ -58,11 +58,11 @@ discard those that cannot be found. We start with the tweet stream of authors: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #tweet-authors } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #tweet-authors } Assume that we can lookup their email address using: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #email-address-lookup2 } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-address-lookup2 } The `Future` is completed with `Failure` if the email is not found. @@ -70,7 +70,7 @@ Transforming the stream of authors to a stream of email addresses by using the ` service can be done with `mapAsync` and we use `Supervision.resumingDecider` to drop unknown email addresses: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #email-addresses-mapAsync-supervision } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-addresses-mapAsync-supervision } If we would not use `Resume` the default stopping strategy would complete the stream with failure on the first `Future` that was completed with `Failure`. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-flows-and-basics.md b/akka-docs/src/main/paradox/scala/stream/stream-flows-and-basics.md index 8fc5c9076d..8313e92065 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-flows-and-basics.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-flows-and-basics.md @@ -77,7 +77,7 @@ starting up Actors). Thanks to Flows being simply a description of the processin thread-safe, and freely shareable*, which means that it is for example safe to share and send them between actors, to have one actor prepare the work, and then have it be materialized at some completely different place in the code. -@@snip [FlowDocSpec.scala](../code/docs/stream/FlowDocSpec.scala) { #materialization-in-steps } +@@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #materialization-in-steps } After running (materializing) the `RunnableGraph[T]` we get back the materialized value of type T. Every stream processing stage can produce a materialized value, and it is the responsibility of the user to combine them to a new type. @@ -91,12 +91,12 @@ there is a convenience method called `runWith()` available for `Sink`, `Source` a supplied `Source` (in order to run a `Sink`), a `Sink` (in order to run a `Source`) or both a `Source` and a `Sink` (in order to run a `Flow`, since it has neither attached yet). -@@snip [FlowDocSpec.scala](../code/docs/stream/FlowDocSpec.scala) { #materialization-runWith } +@@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #materialization-runWith } It is worth pointing out that since processing stages are *immutable*, connecting them returns a new processing stage, instead of modifying the existing instance, so while constructing long flows, remember to assign the new value to a variable or run it: -@@snip [FlowDocSpec.scala](../code/docs/stream/FlowDocSpec.scala) { #source-immutable } +@@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #source-immutable } @@@ note @@ -116,18 +116,18 @@ In the example below we create two running materialized instance of the stream t variable, and both materializations give us a different `Future` from the map even though we used the same `sink` to refer to the future: -@@snip [FlowDocSpec.scala](../code/docs/stream/FlowDocSpec.scala) { #stream-reuse } +@@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #stream-reuse } ### Defining sources, sinks and flows The objects `Source` and `Sink` define various ways to create sources and sinks of elements. The following examples show some of the most useful constructs (refer to the API documentation for more details): -@@snip [FlowDocSpec.scala](../code/docs/stream/FlowDocSpec.scala) { #source-sink } +@@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #source-sink } There are various ways to wire up different parts of a stream, the following examples show some of the available options: -@@snip [FlowDocSpec.scala](../code/docs/stream/FlowDocSpec.scala) { #flow-connecting } +@@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #flow-connecting } ### Illegal stream elements @@ -242,7 +242,7 @@ To allow for parallel processing you will have to insert asynchronous boundaries graphs by way of adding `Attributes.asyncBoundary` using the method `async` on `Source`, `Sink` and `Flow` to pieces that shall communicate with the rest of the graph in an asynchronous fashion. -@@snip [FlowDocSpec.scala](../code/docs/stream/FlowDocSpec.scala) { #flow-async } +@@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #flow-async } In this example we create two regions within the flow which will be executed in one Actor each—assuming that adding and multiplying integers is an extremely costly operation this will lead to a performance gain since two CPUs can @@ -283,7 +283,7 @@ to somehow express how these values should be composed to a final value when we many combinator methods have variants that take an additional argument, a function, that will be used to combine the resulting values. Some examples of using these combiners are illustrated in the example below. -@@snip [FlowDocSpec.scala](../code/docs/stream/FlowDocSpec.scala) { #flow-mat-combine } +@@snip [FlowDocSpec.scala]($code$/scala/docs/stream/FlowDocSpec.scala) { #flow-mat-combine } @@@ note diff --git a/akka-docs/src/main/paradox/scala/stream/stream-graphs.md b/akka-docs/src/main/paradox/scala/stream/stream-graphs.md index e4983cbf5b..2ba3e593d8 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-graphs.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-graphs.md @@ -48,7 +48,7 @@ and each circle corresponds to either a `Junction` or a `Source` or `Sink` if it or ending a `Flow`. Junctions must always be created with defined type parameters, as otherwise the `Nothing` type will be inferred. -@@snip [GraphDSLDocSpec.scala](../code/docs/stream/GraphDSLDocSpec.scala) { #simple-graph-dsl } +@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #simple-graph-dsl } @@@ note @@ -79,7 +79,7 @@ In the example below we prepare a graph that consists of two parallel streams, in which we re-use the same instance of `Flow`, yet it will properly be materialized as two connections between the corresponding Sources and Sinks: -@@snip [GraphDSLDocSpec.scala](../code/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-reusing-a-flow } +@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-reusing-a-flow } ## Constructing and combining Partial Graphs @@ -100,7 +100,7 @@ Let's imagine we want to provide users with a specialized element that given 3 i the greatest int value of each zipped triple. We'll want to expose 3 input ports (unconnected sources) and one output port (unconnected sink). -@@snip [StreamPartialGraphDSLDocSpec.scala](../code/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #simple-partial-graph-dsl } +@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #simple-partial-graph-dsl } As you can see, first we construct the partial graph that contains all the zipping and comparing of stream elements. This partial graph will have three inputs and one output, wherefore we use the `UniformFanInShape`. @@ -140,12 +140,12 @@ from the function passed in . The single outlet must be provided to the `SourceS Refer to the example below, in which we create a Source that zips together two numbers, to see this graph construction in action: -@@snip [StreamPartialGraphDSLDocSpec.scala](../code/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-from-partial-graph-dsl } +@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-from-partial-graph-dsl } Similarly the same can be done for a `Sink[T]`, using `SinkShape.of` in which case the provided value must be an `Inlet[T]`. For defining a `Flow[T]` we need to expose both an inlet and an outlet: -@@snip [StreamPartialGraphDSLDocSpec.scala](../code/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #flow-from-partial-graph-dsl } +@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #flow-from-partial-graph-dsl } ## Combining Sources and Sinks with simplified API @@ -153,11 +153,11 @@ There is a simplified API you can use to combine sources and sinks with junction `Merge[In]` and `Concat[A]` without the need for using the Graph DSL. The combine method takes care of constructing the necessary graph underneath. In following example we combine two sources into one (fan-in): -@@snip [StreamPartialGraphDSLDocSpec.scala](../code/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-combine } +@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-combine } The same can be done for a `Sink[T]` but in this case it will be fan-out: -@@snip [StreamPartialGraphDSLDocSpec.scala](../code/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #sink-combine } +@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #sink-combine } ## Building reusable Graph components @@ -173,7 +173,7 @@ where jobs of higher priority can be sent. Altogether, our junction will have two input ports of type `I` (for the normal and priority jobs) and an output port of type `O`. To represent this interface, we need to define a custom `Shape`. The following lines show how to do that. -@@snip [GraphDSLDocSpec.scala](../code/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape } +@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape } ## Predefined shapes @@ -192,20 +192,20 @@ with multiple input (or output) ports of different types. Since our shape has two input ports and one output port, we can just use the `FanInShape` DSL to define our custom shape: -@@snip [GraphDSLDocSpec.scala](../code/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape2 } +@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape2 } Now that we have a `Shape` we can wire up a Graph that represents our worker pool. First, we will merge incoming normal and priority jobs using `MergePreferred`, then we will send the jobs to a `Balance` junction which will fan-out to a configurable number of workers (flows), finally we merge all these results together and send them out through our only output port. This is expressed by the following code: -@@snip [GraphDSLDocSpec.scala](../code/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-create } +@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-create } All we need to do now is to use our custom junction in a graph. The following code simulates some simple workers and jobs using plain strings and prints out the results. Actually we used *two* instances of our worker pool junction using `add()` twice. -@@snip [GraphDSLDocSpec.scala](../code/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-use } +@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-use } ## Bidirectional Flows @@ -220,19 +220,19 @@ this purpose exists the special type `BidiFlow` which is a graph that has exactly two open inlets and two open outlets. The corresponding shape is called `BidiShape` and is defined like this: -@@snip [Shape.scala]../../../../../../akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } +@@snip [Shape.scala]($akka$/akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } A bidirectional flow is defined just like a unidirectional `Flow` as demonstrated for the codec mentioned above: -@@snip [BidiFlowDocSpec.scala](../code/docs/stream/BidiFlowDocSpec.scala) { #codec } +@@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #codec } The first version resembles the partial graph constructor, while for the simple case of a functional 1:1 transformation there is a concise convenience method as shown on the last line. The implementation of the two functions is not difficult either: -@@snip [BidiFlowDocSpec.scala](../code/docs/stream/BidiFlowDocSpec.scala) { #codec-impl } +@@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #codec-impl } In this way you could easily integrate any other serialization library that turns an object into a sequence of bytes. @@ -242,11 +242,11 @@ a framing protocol means that any received chunk of bytes may correspond to zero or more messages. This is best implemented using a `GraphStage` (see also @ref:[Custom processing with GraphStage](stream-customize.md#graphstage-scala)). -@@snip [BidiFlowDocSpec.scala](../code/docs/stream/BidiFlowDocSpec.scala) { #framing } +@@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #framing } With these implementations we can build a protocol stack and test it: -@@snip [BidiFlowDocSpec.scala](../code/docs/stream/BidiFlowDocSpec.scala) { #compose } +@@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #compose } This example demonstrates how `BidiFlow` subgraphs can be hooked together and also turned around with the `.reversed` method. The test @@ -262,12 +262,12 @@ can be used in the graph as an ordinary source or outlet, and which will eventua If the materialized value is needed at more than one place, it is possible to call `materializedValue` any number of times to acquire the necessary number of outlets. -@@snip [GraphDSLDocSpec.scala](../code/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue } +@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue } Be careful not to introduce a cycle where the materialized value actually contributes to the materialized value. The following example demonstrates a case where the materialized `Future` of a fold is fed back to the fold itself. -@@snip [GraphDSLDocSpec.scala](../code/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue-cycle } +@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue-cycle } ## Graph cycles, liveness and deadlocks @@ -291,7 +291,7 @@ see there are cases where this is very helpful. @@@ -@@snip [GraphCyclesSpec.scala](../code/docs/stream/GraphCyclesSpec.scala) { #deadlocked } +@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #deadlocked } Running this we observe that after a few numbers have been printed, no more elements are logged to the console - all processing stops after some time. After some investigation we observe that: @@ -309,7 +309,7 @@ If we modify our feedback loop by replacing the `Merge` junction with a `MergePr before trying the other lower priority input ports. Since we feed back through the preferred port it is always guaranteed that the elements in the cycles can flow. -@@snip [GraphCyclesSpec.scala](../code/docs/stream/GraphCyclesSpec.scala) { #unfair } +@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #unfair } If we run the example we see that the same sequence of numbers are printed over and over again, but the processing does not stop. Hence, we avoided the deadlock, but `source` is still @@ -327,7 +327,7 @@ be balanced (as many elements are removed as many are injected) then there would To make our cycle both live (not deadlocking) and fair we can introduce a dropping element on the feedback arc. In this case we chose the `buffer()` operation giving it a dropping strategy `OverflowStrategy.dropHead`. -@@snip [GraphCyclesSpec.scala](../code/docs/stream/GraphCyclesSpec.scala) { #dropping } +@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #dropping } If we run this example we see that @@ -346,7 +346,7 @@ the beginning instead. To achieve this we modify our first graph by replacing th Since `ZipWith` takes one element from `source` *and* from the feedback arc to inject one element into the cycle, we maintain the balance of elements. -@@snip [GraphCyclesSpec.scala](../code/docs/stream/GraphCyclesSpec.scala) { #zipping-dead } +@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-dead } Still, when we try to run the example it turns out that no element is printed at all! After some investigation we realize that: @@ -358,7 +358,7 @@ These two conditions are a typical "chicken-and-egg" problem. The solution is to element into the cycle that is independent from `source`. We do this by using a `Concat` junction on the backwards arc that injects a single element using `Source.single`. -@@snip [GraphCyclesSpec.scala](../code/docs/stream/GraphCyclesSpec.scala) { #zipping-live } +@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-live } When we run the above example we see that processing starts and never stops. The important takeaway from this example is that balanced cycles often need an initial "kick-off" element to be injected into the cycle. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-integrations.md b/akka-docs/src/main/paradox/scala/stream/stream-integrations.md index ac636693c8..e6a42eeb1c 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-integrations.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-integrations.md @@ -15,7 +15,7 @@ use `ask` in `mapAsync`. The back-pressure of the stream is maintained by the `Future` of the `ask` and the mailbox of the actor will not be filled with more messages than the given `parallelism` of the `mapAsync` stage. -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #mapAsync-ask } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #mapAsync-ask } Note that the messages received in the actor will be in the same order as the stream elements, i.e. the `parallelism` does not change the ordering @@ -28,7 +28,7 @@ The actor must reply to the `sender()` for each message from the stream. That reply will complete the `Future` of the `ask` and it will be the element that is emitted downstreams from `mapAsync`. -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #ask-actor } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #ask-actor } The stream can be completed with failure by sending `akka.actor.Status.Failure` as reply from the actor. @@ -113,24 +113,24 @@ performed with `mapAsync` or `mapAsyncUnordered`. For example, sending emails to the authors of selected tweets using an external email service: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #email-server-send } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-server-send } We start with the tweet stream of authors: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #tweet-authors } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #tweet-authors } Assume that we can lookup their email address using: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #email-address-lookup } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-address-lookup } Transforming the stream of authors to a stream of email addresses by using the `lookupEmail` service can be done with `mapAsync`: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #email-addresses-mapAsync } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #email-addresses-mapAsync } Finally, sending the emails: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #send-emails } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #send-emails } `mapAsync` is applying the given function that is calling out to the external service to each of the elements as they pass through this processing step. The function returns a `Future` @@ -152,23 +152,23 @@ result stream onwards for further processing or storage. Note that `mapAsync` preserves the order of the stream elements. In this example the order is not important and then we can use the more efficient `mapAsyncUnordered`: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #external-service-mapAsyncUnordered } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #external-service-mapAsyncUnordered } In the above example the services conveniently returned a `Future` of the result. If that is not the case you need to wrap the call in a `Future`. If the service call involves blocking you must also make sure that you run it on a dedicated execution context, to avoid starvation and disturbance of other tasks in the system. -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #blocking-mapAsync } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-mapAsync } The configuration of the `"blocking-dispatcher"` may look something like: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #blocking-dispatcher-config } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-dispatcher-config } An alternative for blocking calls is to perform them in a `map` operation, still using a dedicated dispatcher for that operation. -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #blocking-map } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #blocking-map } However, that is not exactly the same as `mapAsync`, since the `mapAsync` may run several calls concurrently, but `map` performs them one at a time. @@ -176,7 +176,7 @@ several calls concurrently, but `map` performs them one at a time. For a service that is exposed as an actor, or if an actor is used as a gateway in front of an external service, you can use `ask`: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #save-tweets } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #save-tweets } Note that if the `ask` is not completed within the given timeout the stream is completed with failure. If that is not desired outcome you can use `recover` on the `ask` `Future`. @@ -204,14 +204,14 @@ successive calls as long as there is downstream demand of several elements. Here is a fictive service that we can use to illustrate these aspects. -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-service } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-service } Elements starting with a lower case character are simulated to take longer time to process. Here is how we can use it with `mapAsync`: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-mapAsync } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-mapAsync } The output may look like this: @@ -268,7 +268,7 @@ calls are limited by the buffer size (4) of the `ActorMaterializerSettings`. Here is how we can use the same service with `mapAsyncUnordered`: -@@snip [IntegrationDocSpec.scala](../code/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-mapAsyncUnordered } +@@snip [IntegrationDocSpec.scala]($code$/scala/docs/stream/IntegrationDocSpec.scala) { #sometimes-slow-mapAsyncUnordered } The output may look like this: @@ -338,19 +338,19 @@ An incomplete list of other implementations: The two most important interfaces in Reactive Streams are the `Publisher` and `Subscriber`. -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #imports } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #imports } Let us assume that a library provides a publisher of tweets: -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #tweets-publisher } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #tweets-publisher } and another library knows how to store author handles in a database: -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #author-storage-subscriber } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #author-storage-subscriber } Using an Akka Streams `Flow` we can transform the stream and connect those: -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #authors #connect-all } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #authors #connect-all } The `Publisher` is used as an input `Source` to the flow and the `Subscriber` is used as an output `Sink`. @@ -359,23 +359,23 @@ A `Flow` can also be also converted to a `RunnableGraph[Processor[In, Out]]` whi materializes to a `Processor` when `run()` is called. `run()` itself can be called multiple times, resulting in a new `Processor` instance each time. -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #flow-publisher-subscriber } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #flow-publisher-subscriber } A publisher can be connected to a subscriber with the `subscribe` method. It is also possible to expose a `Source` as a `Publisher` by using the Publisher-`Sink`: -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #source-publisher } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #source-publisher } A publisher that is created with `Sink.asPublisher(fanout = false)` supports only a single subscription. Additional subscription attempts will be rejected with an `IllegalStateException`. A publisher that supports multiple subscribers using fan-out/broadcasting is created as follows: -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #author-alert-subscriber #author-storage-subscriber } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #author-alert-subscriber #author-storage-subscriber } -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #source-fanoutPublisher } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #source-fanoutPublisher } The input buffer size of the stage controls how far apart the slowest subscriber can be from the fastest subscriber before slowing down the stream. @@ -383,12 +383,12 @@ before slowing down the stream. To make the picture complete, it is also possible to expose a `Sink` as a `Subscriber` by using the Subscriber-`Source`: -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #sink-subscriber } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #sink-subscriber } It is also possible to use re-wrap `Processor` instances as a `Flow` by passing a factory function that will create the `Processor` instances: -@@snip [ReactiveStreamsDocSpec.scala](../code/docs/stream/ReactiveStreamsDocSpec.scala) { #use-processor } +@@snip [ReactiveStreamsDocSpec.scala]($code$/scala/docs/stream/ReactiveStreamsDocSpec.scala) { #use-processor } Please note that a factory is necessary to achieve reusability of the resulting `Flow`. @@ -436,7 +436,7 @@ stream publisher that keeps track of the subscription life cycle and requested e Here is an example of such an actor. It dispatches incoming jobs to the attached subscriber: -@@snip [ActorPublisherDocSpec.scala](../code/docs/stream/ActorPublisherDocSpec.scala) { #job-manager } +@@snip [ActorPublisherDocSpec.scala]($code$/scala/docs/stream/ActorPublisherDocSpec.scala) { #job-manager } You send elements to the stream by calling `onNext`. You are allowed to send as many elements as have been requested by the stream subscriber. This amount can be inquired with @@ -468,7 +468,7 @@ More detailed information can be found in the API documentation. This is how it can be used as input `Source` to a `Flow`: -@@snip [ActorPublisherDocSpec.scala](../code/docs/stream/ActorPublisherDocSpec.scala) { #actor-publisher-usage } +@@snip [ActorPublisherDocSpec.scala]($code$/scala/docs/stream/ActorPublisherDocSpec.scala) { #actor-publisher-usage } A publisher that is created with `Sink.asPublisher` supports a specified number of subscribers. Additional subscription attempts will be rejected with an `IllegalStateException`. @@ -493,7 +493,7 @@ messages from the stream. It can also receive other, non-stream messages, in the Here is an example of such an actor. It dispatches incoming jobs to child worker actors: -@@snip [ActorSubscriberDocSpec.scala](../code/docs/stream/ActorSubscriberDocSpec.scala) { #worker-pool } +@@snip [ActorSubscriberDocSpec.scala]($code$/scala/docs/stream/ActorSubscriberDocSpec.scala) { #worker-pool } Subclass must define the `RequestStrategy` to control stream back pressure. After each incoming message the `ActorSubscriber` will automatically invoke @@ -511,4 +511,4 @@ More detailed information can be found in the API documentation. This is how it can be used as output `Sink` to a `Flow`: -@@snip [ActorSubscriberDocSpec.scala](../code/docs/stream/ActorSubscriberDocSpec.scala) { #actor-subscriber-usage } \ No newline at end of file +@@snip [ActorSubscriberDocSpec.scala]($code$/scala/docs/stream/ActorSubscriberDocSpec.scala) { #actor-subscriber-usage } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-io.md b/akka-docs/src/main/paradox/scala/stream/stream-io.md index 175206b290..5654ac1fec 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-io.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-io.md @@ -12,7 +12,7 @@ as the library does it transparently for you. In order to implement a simple EchoServer we `bind` to a given address, which returns a `Source[IncomingConnection, Future[ServerBinding]]`, which will emit an `IncomingConnection` element for each new connection that the Server should handle: -@@snip [StreamTcpDocSpec.scala](../code/docs/stream/io/StreamTcpDocSpec.scala) { #echo-server-simple-bind } +@@snip [StreamTcpDocSpec.scala]($code$/scala/docs/stream/io/StreamTcpDocSpec.scala) { #echo-server-simple-bind } ![tcp-stream-bind.png](../../images/tcp-stream-bind.png) @@ -23,7 +23,7 @@ helper Flow to chunk the inputs up into actual lines of text. The last boolean argument indicates that we require an explicit line ending even for the last message before the connection is closed. In this example we simply add exclamation marks to each incoming text message and push it through the flow: -@@snip [StreamTcpDocSpec.scala](../code/docs/stream/io/StreamTcpDocSpec.scala) { #echo-server-simple-handle } +@@snip [StreamTcpDocSpec.scala]($code$/scala/docs/stream/io/StreamTcpDocSpec.scala) { #echo-server-simple-handle } ![tcp-stream-run.png](../../images/tcp-stream-run.png) @@ -49,7 +49,7 @@ Let's say we know a server has exposed a simple command line interface over TCP, and would like to interact with it using Akka Streams over TCP. To open an outgoing connection socket we use the `outgoingConnection` method: -@@snip [StreamTcpDocSpec.scala](../code/docs/stream/io/StreamTcpDocSpec.scala) { #repl-client } +@@snip [StreamTcpDocSpec.scala]($code$/scala/docs/stream/io/StreamTcpDocSpec.scala) { #repl-client } The `repl` flow we use to handle the server interaction first prints the servers response, then awaits on input from the command line (this blocking call is used here just for the sake of simplicity) and converts it to a @@ -84,7 +84,7 @@ Thankfully in most situations finding the right spot to start the conversation i to the protocol we are trying to implement using Streams. In chat-like applications, which our examples resemble, it makes sense to make the Server initiate the conversation by emitting a "hello" message: -@@snip [StreamTcpDocSpec.scala](../code/docs/stream/io/StreamTcpDocSpec.scala) { #welcome-banner-chat-server } +@@snip [StreamTcpDocSpec.scala]($code$/scala/docs/stream/io/StreamTcpDocSpec.scala) { #welcome-banner-chat-server } To emit the initial message we merge a `Source` with a single element, after the command processing but before the framing and transformation to `ByteString` s this way we do not have to repeat such logic. @@ -101,7 +101,7 @@ on files. Streaming data from a file is as easy as creating a *FileIO.fromPath* given a target path, and an optional `chunkSize` which determines the buffer size determined as one "element" in such stream: -@@snip [StreamFileDocSpec.scala](../code/docs/stream/io/StreamFileDocSpec.scala) { #file-source } +@@snip [StreamFileDocSpec.scala]($code$/scala/docs/stream/io/StreamFileDocSpec.scala) { #file-source } Please note that these processing stages are backed by Actors and by default are configured to run on a pre-configured threadpool-backed dispatcher dedicated for File IO. This is very important as it isolates the blocking file IO operations from the rest @@ -109,4 +109,4 @@ of the ActorSystem allowing each dispatcher to be utilised in the most efficient dispatcher for file IO operations globally, you can do so by changing the `akka.stream.blocking-io-dispatcher`, or for a specific stage by specifying a custom Dispatcher in code, like this: -@@snip [StreamFileDocSpec.scala](../code/docs/stream/io/StreamFileDocSpec.scala) { #custom-dispatcher-code } \ No newline at end of file +@@snip [StreamFileDocSpec.scala]($code$/scala/docs/stream/io/StreamFileDocSpec.scala) { #custom-dispatcher-code } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-parallelism.md b/akka-docs/src/main/paradox/scala/stream/stream-parallelism.md index 2631c1cc1a..010de241c6 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-parallelism.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-parallelism.md @@ -23,7 +23,7 @@ are two pancakes being cooked at the same time, one being cooked on its first si completion. This is how this setup would look like implemented as a stream: -@@snip [FlowParallelismDocSpec.scala](../code/docs/stream/FlowParallelismDocSpec.scala) { #pipelining } +@@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelining } The two `map` stages in sequence (encapsulated in the "frying pan" flows) will be executed in a pipelined way, basically doing the same as Roland with his frying pans: @@ -55,7 +55,7 @@ the results on a shared plate. Whenever a pan becomes empty, he takes the next s In essence he parallelizes the same process over multiple pans. This is how this setup will look like if implemented using streams: -@@snip [FlowParallelismDocSpec.scala](../code/docs/stream/FlowParallelismDocSpec.scala) { #parallelism } +@@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallelism } The benefit of parallelizing is that it is easy to scale. In the pancake example it is easy to add a third frying pan with Patrik's method, but Roland cannot add a third frying pan, @@ -77,7 +77,7 @@ First, let's look at how we can parallelize pipelined processing stages. In the will employ two chefs, each working using Roland's pipelining method, but we use the two chefs in parallel, just like Patrik used the two frying pans. This is how it looks like if expressed as streams: -@@snip [FlowParallelismDocSpec.scala](../code/docs/stream/FlowParallelismDocSpec.scala) { #parallel-pipeline } +@@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallel-pipeline } The above pattern works well if there are many independent jobs that do not depend on the results of each other, but the jobs themselves need multiple processing steps where each step builds on the result of @@ -95,7 +95,7 @@ plate. This is again straightforward to implement with the streams API: -@@snip [FlowParallelismDocSpec.scala](../code/docs/stream/FlowParallelismDocSpec.scala) { #pipelined-parallel } +@@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelined-parallel } This usage pattern is less common but might be usable if a certain step in the pipeline might take wildly different times to finish different jobs. The reason is that there are more balance-merge steps in this pattern diff --git a/akka-docs/src/main/paradox/scala/stream/stream-quickstart.md b/akka-docs/src/main/paradox/scala/stream/stream-quickstart.md index bd8e9171c3..4d869a6c5f 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-quickstart.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-quickstart.md @@ -8,19 +8,19 @@ choice as described in @ref:[Using a build tool](../../scala/intro/getting-start A stream usually begins at a source, so this is also how we start an Akka Stream. Before we create one, we import the full complement of streaming tools: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #stream-imports } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #stream-imports } If you want to execute the code samples while you read through the quick start guide, you will also need the following imports: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #other-imports } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #other-imports } And an object to hold your code, for example: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #main-app } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #main-app } Now we will start with a rather simple source, emitting the integers 1 to 100: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #create-source } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #create-source } The `Source` type is parameterized with two types: the first one is the type of element that this source emits and the second one may signal that @@ -33,7 +33,7 @@ Having created this source means that we have a description of how to emit the first 100 natural numbers, but this source is not yet active. In order to get those numbers out we have to run it: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #run-source } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #run-source } This line will complement the source with a consumer function—in this example we simply print out the numbers to the console—and pass this little stream @@ -45,13 +45,13 @@ When running this source in a `scala.App` you might notice it does not terminate, because the `ActorSystem` is never terminated. Luckily `runForeach` returns a `Future[Done]` which resolves when the stream finishes: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #run-source-and-terminate } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #run-source-and-terminate } You may wonder where the Actor gets created that runs the stream, and you are probably also asking yourself what this `materializer` means. In order to get this value we first need to create an Actor system: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #create-materializer } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #create-materializer } There are other ways to create a materializer, e.g. from an `ActorContext` when using streams from within Actors. The @@ -66,7 +66,7 @@ description of what you want to run, and like an architect’s blueprint it can be reused, incorporated into a larger design. We may choose to transform the source of integers and write it to a file instead: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #transform-source } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #transform-source } First we use the `scan` combinator to run a computation over the whole stream: starting with the number 1 (`BigInt(1)`) we multiple by each of @@ -93,7 +93,7 @@ language for writing these streams always flows from left to right (just like plain English), we need a starting point that is like a source but with an “open” input. In Akka Streams this is called a `Flow`: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #transform-sink } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #transform-sink } Starting from a flow of strings we convert each to `ByteString` and then feed to the already known file-writing `Sink`. The resulting blueprint @@ -109,7 +109,7 @@ We can use the new and shiny `Sink` we just created by attaching it to our `factorials` source—after a small adaptation to turn the numbers into strings: -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #use-transformed-sink } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #use-transformed-sink } ## Time-Based Processing @@ -121,7 +121,7 @@ number emitted by the `factorials` source is the factorial of zero, the second is the factorial of one, and so on. We combine these two by forming strings like `"3! = 6"`. -@@snip [QuickStartDocSpec.scala](../code/docs/stream/QuickStartDocSpec.scala) { #add-streams } +@@snip [QuickStartDocSpec.scala]($code$/scala/docs/stream/QuickStartDocSpec.scala) { #add-streams } All operations so far have been time-independent and could have been performed in the same fashion on strict collections of elements. The next line @@ -162,7 +162,7 @@ allow to control what should happen in such scenarios. Here's the data model we'll be working with throughout the quickstart examples: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #model } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #model } @@@ note @@ -180,7 +180,7 @@ like for example finding all twitter handles of users who tweet about `#akka`. In order to prepare our environment by creating an `ActorSystem` and `ActorMaterializer`, which will be responsible for materializing and running the streams we are about to create: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #materializer-setup } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #materializer-setup } The `ActorMaterializer` can optionally take `ActorMaterializerSettings` which can be used to define materialization properties, such as default buffer sizes (see also @ref:[Buffers for asynchronous stages](stream-rate.md#async-stream-buffers-scala)), the dispatcher to @@ -188,7 +188,7 @@ be used by the pipeline etc. These can be overridden with `withAttributes` on `F Let's assume we have a stream of tweets readily available. In Akka this is expressed as a `Source[Out, M]`: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweet-source } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweet-source } Streams always start flowing from a `Source[Out,M1]` then can continue through `Flow[In,Out,M2]` elements or more advanced graph elements to finally be consumed by a `Sink[In,M3]` (ignore the type parameters `M1`, `M2` @@ -199,7 +199,7 @@ The operations should look familiar to anyone who has used the Scala Collections however they operate on streams and not collections of data (which is a very important distinction, as some operations only make sense in streaming and vice versa): -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-filter-map } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-filter-map } Finally in order to @ref:[materialize](stream-flows-and-basics.md#stream-materialization-scala) and run the stream computation we need to attach the Flow to a `Sink` that will get the Flow running. The simplest way to do this is to call @@ -207,18 +207,18 @@ the Flow to a `Sink` that will get the Flow running. The simplest way to do this the `Sink` companion object. For now let's simply print each author: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-foreachsink-println } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-foreachsink-println } or by using the shorthand version (which are defined only for the most popular Sinks such as `Sink.fold` and `Sink.foreach`): -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-foreach-println } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #authors-foreach-println } Materializing and running a stream always requires a `Materializer` to be in implicit scope (or passed in explicitly, like this: `.run(materializer)`). The complete snippet looks like this: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #first-sample } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #first-sample } ## Flattening sequences in streams @@ -227,7 +227,7 @@ we might want to map from one element to a number of elements and receive a "fla works on Scala Collections. In order to get a flattened stream of hashtags from our stream of tweets we can use the `mapConcat` combinator: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #hashtags-mapConcat } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #hashtags-mapConcat } @@@ note @@ -257,7 +257,7 @@ at the expense of not reading as familiarly as collection transformations. Graphs are constructed using `GraphDSL` like this: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #graph-dsl-broadcast } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #graph-dsl-broadcast } As you can see, inside the `GraphDSL` we use an implicit graph builder `b` to mutably construct the graph using the `~>` "edge operator" (also read as "connect" or "via" or "to"). The operator is provided implicitly @@ -289,7 +289,7 @@ in either `OutOfMemoryError` s or other severe degradations of service responsiv and must be handled explicitly. For example, if we are only interested in the "*most recent tweets, with a buffer of 10 elements*" this can be expressed using the `buffer` element: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-slow-consumption-dropHead } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-slow-consumption-dropHead } The `buffer` element takes an explicit and required `OverflowStrategy`, which defines how the buffer should react when it receives another element while it is full. Strategies provided include dropping the oldest element (`dropHead`), @@ -307,7 +307,7 @@ but in general it is possible to deal with finite streams and come up with a nic First, let's write such an element counter using `Sink.fold` and see how the types look like: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-fold-count } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-fold-count } First we prepare a reusable `Flow` that will change each incoming tweet into an integer of value `1`. We'll use this in order to combine those with a `Sink.fold` that will sum all `Int` elements of the stream and make its result available as @@ -333,13 +333,13 @@ and materialized multiple times, because it is just the "blueprint" of the strea for example one that consumes a live stream of tweets within a minute, the materialized values for those two materializations will be different, as illustrated by this example: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-runnable-flow-materialized-twice } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-runnable-flow-materialized-twice } Many elements in Akka Streams provide materialized values which can be used for obtaining either results of computation or steering these elements which will be discussed in detail in @ref:[Stream Materialization](stream-flows-and-basics.md#stream-materialization-scala). Summing up this section, now we know what happens behind the scenes when we run this one-liner, which is equivalent to the multi line version above: -@@snip [TwitterStreamQuickstartDocSpec.scala](../code/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-fold-count-oneline } +@@snip [TwitterStreamQuickstartDocSpec.scala]($code$/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala) { #tweets-fold-count-oneline } @@@ note diff --git a/akka-docs/src/main/paradox/scala/stream/stream-rate.md b/akka-docs/src/main/paradox/scala/stream/stream-rate.md index b142f16089..ddf8c36873 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-rate.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-rate.md @@ -12,7 +12,7 @@ To run a stage asynchronously it has to be marked explicitly as such using the ` asynchronously means that a stage, after handing out an element to its downstream consumer is able to immediately process the next message. To demonstrate what we mean by this, let's take a look at the following example: -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #pipelining } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #pipelining } Running the above example, one of the possible outputs looks like this: @@ -61,16 +61,16 @@ akka.stream.materializer.max-input-buffer-size = 16 Alternatively they can be set by passing a `ActorMaterializerSettings` to the materializer: -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #materializer-buffer } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #materializer-buffer } If the buffer size needs to be set for segments of a `Flow` only, it is possible by defining a separate `Flow` with these attributes: -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #section-buffer } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #section-buffer } Here is an example of a code that demonstrate some of the issues caused by internal buffers: -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #buffering-abstraction-leak } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #buffering-abstraction-leak } Running the above example one would expect the number *3* to be printed in every 3 seconds (the `conflateWithSeed` step here is configured so that it counts the number of elements received before the downstream `ZipWith` consumes @@ -94,7 +94,7 @@ pipeline of an application. The example below will ensure that 1000 jobs (but not more) are dequeued from an external (imaginary) system and stored locally in memory - relieving the external system: -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-backpressure } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-backpressure } The next example will also queue up 1000 jobs locally, but if there are more jobs waiting in the imaginary external systems, it makes space for the new element by @@ -102,12 +102,12 @@ dropping one element from the *tail* of the buffer. Dropping from the tail is a it must be noted that this will drop the *youngest* waiting job. If some "fairness" is desired in the sense that we want to be nice to jobs that has been waiting for long, then this option can be useful. -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-droptail } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-droptail } Instead of dropping the youngest element from the tail of the buffer a new element can be dropped without enqueueing it to the buffer at all. -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-dropnew } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-dropnew } Here is another example with a queue of 1000 jobs, but it makes space for the new element by dropping one element from the *head* of the buffer. This is the *oldest* @@ -116,13 +116,13 @@ resent if not processed in a certain period. The oldest element will be retransmitted soon, (in fact a retransmitted duplicate might be already in the queue!) so it makes sense to drop it first. -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-drophead } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-drophead } Compared to the dropping strategies above, dropBuffer drops all the 1000 jobs it has enqueued once the buffer gets full. This aggressive strategy is useful when dropping jobs is preferred to delaying jobs. -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-dropbuffer } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-dropbuffer } If our imaginary external job provider is a client using our API, we might want to enforce that the client cannot have more than 1000 queued jobs @@ -130,7 +130,7 @@ otherwise we consider it flooding and terminate the connection. This is easily achievable by the error strategy which simply fails the stream once the buffer gets full. -@@snip [StreamBuffersRateSpec.scala](../code/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-fail } +@@snip [StreamBuffersRateSpec.scala]($code$/scala/docs/stream/StreamBuffersRateSpec.scala) { #explicit-buffers-fail } ## Rate transformation @@ -142,7 +142,7 @@ useful to combine elements from a producer until a demand signal comes from a co Below is an example snippet that summarizes fast stream of elements to a standart deviation, mean and count of elements that have arrived while the stats have been calculated. -@@snip [RateTransformationDocSpec.scala](../code/docs/stream/RateTransformationDocSpec.scala) { #conflate-summarize } +@@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #conflate-summarize } This example demonstrates that such flow's rate is decoupled. The element rate at the start of the flow can be much higher that the element rate at the end of the flow. @@ -151,7 +151,7 @@ Another possible use of `conflate` is to not consider all elements for summary w Example below demonstrates how `conflate` can be used to implement random drop of elements when consumer is not able to keep up with the producer. -@@snip [RateTransformationDocSpec.scala](../code/docs/stream/RateTransformationDocSpec.scala) { #conflate-sample } +@@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #conflate-sample } ### Understanding expand @@ -161,12 +161,12 @@ Expand allows to extrapolate a value to be sent as an element to a consumer. As a simple use of `expand` here is a flow that sends the same element to consumer when producer does not send any new elements. -@@snip [RateTransformationDocSpec.scala](../code/docs/stream/RateTransformationDocSpec.scala) { #expand-last } +@@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #expand-last } Expand also allows to keep some state between demand requests from the downstream. Leveraging this, here is a flow that tracks and reports a drift between fast consumer and slow producer. -@@snip [RateTransformationDocSpec.scala](../code/docs/stream/RateTransformationDocSpec.scala) { #expand-drift } +@@snip [RateTransformationDocSpec.scala]($code$/scala/docs/stream/RateTransformationDocSpec.scala) { #expand-drift } Note that all of the elements coming from upstream will go through `expand` at least once. This means that the output of this flow is going to report a drift of zero if producer is fast enough, or a larger drift otherwise. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-testkit.md b/akka-docs/src/main/paradox/scala/stream/stream-testkit.md index 72e9a23faa..c5d039b005 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-testkit.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-testkit.md @@ -20,20 +20,20 @@ elements from a predefined collection, running a constructed test flow and asserting on the results that sink produced. Here is an example of a test for a sink: -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #strict-collection } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #strict-collection } The same strategy can be applied for sources as well. In the next example we have a source that produces an infinite stream of elements. Such source can be tested by asserting that first arbitrary number of elements hold some condition. Here the `take` combinator and `Sink.seq` are very useful. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #grouped-infinite } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #grouped-infinite } When testing a flow we need to attach a source and a sink. As both stream ends are under our control, we can choose sources that tests various edge cases of the flow and sinks that ease assertions. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #folded-stream } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #folded-stream } ## TestKit @@ -45,7 +45,7 @@ One of the more straightforward tests would be to materialize stream to a `Future` and then use `pipe` pattern to pipe the result of that future to the probe. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #pipeto-testprobe } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #pipeto-testprobe } Instead of materializing to a future, we can use a `Sink.actorRef` that sends all incoming elements to the given `ActorRef`. Now we can use @@ -53,13 +53,13 @@ assertion methods on `TestProbe` and expect elements one by one as they arrive. We can also assert stream completion by expecting for `onCompleteMessage` which was given to `Sink.actorRef`. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #sink-actorref } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #sink-actorref } Similarly to `Sink.actorRef` that provides control over received elements, we can use `Source.actorRef` and have full control over elements to be sent. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #source-actorref } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #source-actorref } ## Streams TestKit @@ -78,20 +78,20 @@ Be sure to add the module `akka-stream-testkit` to your dependencies. A sink returned by `TestSink.probe` allows manual control over demand and assertions over elements coming downstream. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #test-sink-probe } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-sink-probe } A source returned by `TestSource.probe` can be used for asserting demand or controlling when stream is completed or ended with an error. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #test-source-probe } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-source-probe } You can also inject exceptions and test sink behaviour on error conditions. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #injecting-failure } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #injecting-failure } Test source and sink can be used together in combination when testing flows. -@@snip [StreamTestKitDocSpec.scala](../code/docs/stream/StreamTestKitDocSpec.scala) { #test-source-and-sink } +@@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #test-source-and-sink } ## Fuzzing Mode diff --git a/akka-docs/src/main/paradox/scala/testing.md b/akka-docs/src/main/paradox/scala/testing.md index 2f2dcf5468..ff8cd9f1a7 100644 --- a/akka-docs/src/main/paradox/scala/testing.md +++ b/akka-docs/src/main/paradox/scala/testing.md @@ -79,7 +79,7 @@ Having access to the actual `Actor` object allows application of all traditional unit testing techniques on the contained methods. Obtaining a reference is done like this: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-actor-ref } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-actor-ref } Since `TestActorRef` is generic in the actor type it returns the underlying actor with its proper static type. From this point on you may bring @@ -92,7 +92,7 @@ If your actor under test is a `FSM`, you may use the special `TestFSMRef` which offers all features of a normal `TestActorRef` and in addition allows access to the internal state: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-fsm-ref } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-fsm-ref } Due to a limitation in Scala’s type inference, there is only the factory method shown above, so you will probably write code like `TestFSMRef(new MyFSM)` @@ -119,7 +119,7 @@ usual. This trick is made possible by the `CallingThreadDispatcher` described below (see [CallingThreadDispatcher](#callingthreaddispatcher)); this dispatcher is set implicitly for any actor instantiated into a `TestActorRef`. -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-behavior } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-behavior } As the `TestActorRef` is a subclass of `LocalActorRef` with a few special extras, also aspects like supervision and restarting work properly, but @@ -149,7 +149,7 @@ any thrown exceptions, then there is another mode available for you: just use the `receive` method on `TestActorRef`, which will be forwarded to the underlying actor: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-expecting-exceptions } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-expecting-exceptions } ### Use Cases @@ -185,7 +185,7 @@ single procedure drives the test. The `TestKit` class contains a collection of tools which makes this common task easy. -@@snip [PlainWordSpec.scala](code/docs/testkit/PlainWordSpec.scala) { #plain-spec } +@@snip [PlainWordSpec.scala]($code$/scala/docs/testkit/PlainWordSpec.scala) { #plain-spec } The `TestKit` contains an actor named `testActor` which is the entry point for messages to be examined with the various `expectMsg...` @@ -332,7 +332,7 @@ handler with the `TestEventListener` and using an `EventFilter` allows assertions on log messages, including those which are generated by exceptions: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #event-filter } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #event-filter } If a number of occurrences is specific—as demonstrated above—then `intercept` will block until that number of matching messages have been received or the @@ -381,7 +381,7 @@ It should be noted that if the last message-receiving assertion of the block is latencies. This means that while individual contained assertions still use the maximum time bound, the overall block may take arbitrarily longer in this case. -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-within } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-within } @@@ note @@ -405,14 +405,14 @@ internally scaled by a factor taken from the [Configuration](), You can scale other durations with the same factor by using the implicit conversion in `akka.testkit` package object to add dilated function to `Duration`. -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #duration-dilation } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #duration-dilation } ### Resolving Conflicts with Implicit ActorRef If you want the sender of messages inside your TestKit-based tests to be the `testActor` simply mix in `ImplicitSender` into your test. -@@snip [PlainWordSpec.scala](code/docs/testkit/PlainWordSpec.scala) { #implicit-sender } +@@snip [PlainWordSpec.scala]($code$/scala/docs/testkit/PlainWordSpec.scala) { #implicit-sender } ### Using Multiple Probe Actors @@ -424,11 +424,11 @@ message flows. To make this more powerful and convenient, there is a concrete implementation called `TestProbe`. The functionality is best explained using a small example: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #imports-test-probe } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #imports-test-probe } -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #my-double-echo } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #my-double-echo } -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-probe } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe } Here a the system under test is simulated by `MyDoubleEcho`, which is supposed to mirror its input to two outputs. Attaching two test probes enables @@ -441,12 +441,12 @@ the test setup. If you have many test probes, you can name them to get meaningful actor names in test logs and assertions: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-probe-with-custom-name } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-with-custom-name } Probes may also be equipped with custom assertions to make your test code even more concise and clear: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-special-probe } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-special-probe } You have complete flexibility here in mixing and matching the `TestKit` facilities with your own checks and choosing an intuitive name for it. In real @@ -468,14 +468,14 @@ means that it is dangerous to try watching e.g. `TestActorRef` from a A `TestProbe` can register itself for DeathWatch of any other actor: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-probe-watch } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-watch } #### Replying to Messages Received by Probes The probes keep track of the communications channel for replies, if possible, so they can also reply: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-probe-reply } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-reply } #### Forwarding Messages Received by Probes @@ -485,9 +485,9 @@ sent to a `TestProbe` `probe` instead, you can make assertions concerning volume and timing of the message flow while still keeping the network functioning: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-probe-forward-actors } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-forward-actors } -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-probe-forward } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-probe-forward } The `dest` actor will receive the same message invocation as if no test probe had intervened. @@ -501,7 +501,7 @@ keep a test running and verify traces later you can also install an This code can be used to forward messages, e.g. in a chain `A --> Probe --> B`, as long as a certain protocol is obeyed. -@@snip [TestProbeSpec.scala]../../../../../akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala) { #autopilot } +@@snip [TestProbeSpec.scala]($akka$/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala) { #autopilot } The `run` method must return the auto-pilot for the next message, which may be `KeepRunning` to retain the current one or `NoAutoPilot` @@ -515,7 +515,7 @@ described [above](#testkit-within) is local to each probe. Hence, probes do not react to each other's deadlines or to the deadline set in an enclosing `TestKit` instance: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-within-probe } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-within-probe } Here, the `expectMsg` call will use the default timeout. @@ -536,14 +536,14 @@ Conversely, a parent's binding to its child can be lessened as follows: For example, the structure of the code you want to test may follow this pattern: -@@snip [ParentChildSpec.scala](code/docs/testkit/ParentChildSpec.scala) { #test-example } +@@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-example } #### Introduce child to its parent The first option is to avoid use of the `context.parent` function and create a child with a custom parent by passing an explicit reference to its parent instead. -@@snip [ParentChildSpec.scala](code/docs/testkit/ParentChildSpec.scala) { #test-dependentchild } +@@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-dependentchild } #### Create the child using TestProbe @@ -551,7 +551,7 @@ The `TestProbe` class can in fact create actors that will run with the test prob This will cause any messages the child actor sends to *context.parent* to end up in the test probe. -@@snip [ParentChildSpec.scala](code/docs/testkit/ParentChildSpec.scala) { #test-TestProbe-parent } +@@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-TestProbe-parent } #### Using a fabricated parent @@ -559,22 +559,22 @@ If you prefer to avoid modifying the parent or child constructor you can create a fabricated parent in your test. This, however, does not enable you to test the parent actor in isolation. -@@snip [ParentChildSpec.scala](code/docs/testkit/ParentChildSpec.scala) { #test-fabricated-parent } +@@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-fabricated-parent } #### Externalize child making from the parent Alternatively, you can tell the parent how to create its child. There are two ways to do this: by giving it a `Props` object or by giving it a function which takes care of creating the child actor: -@@snip [ParentChildSpec.scala](code/docs/testkit/ParentChildSpec.scala) { #test-dependentparent } +@@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #test-dependentparent } Creating the `Props` is straightforward and the function may look like this in your test code: -@@snip [ParentChildSpec.scala](code/docs/testkit/ParentChildSpec.scala) { #child-maker-test } +@@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #child-maker-test } And like this in your application code: -@@snip [ParentChildSpec.scala](code/docs/testkit/ParentChildSpec.scala) { #child-maker-prod } +@@snip [ParentChildSpec.scala]($code$/scala/docs/testkit/ParentChildSpec.scala) { #child-maker-prod } Which of these methods is the best depends on what is most important to test. The most generic option is to create the parent actor by passing it a function that is @@ -594,7 +594,7 @@ so long as all intervening actors run on this dispatcher. Just set the dispatcher as you normally would: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #calling-thread-dispatcher } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #calling-thread-dispatcher } ### How it works @@ -729,7 +729,7 @@ options: `akka.actor.debug.receive` — which enables the `loggable` statement to be applied to an actor’s `receive` function: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #logging-receive } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #logging-receive } If the aforementioned setting is not given in the [Configuration](), this method will pass through the given `Receive` function unmodified, meaning that @@ -783,7 +783,7 @@ support. If for some reason it is a problem to inherit from `TestKit` due to it being a concrete class instead of a trait, there’s `TestKitBase`: -@@snip [TestkitDocSpec.scala](code/docs/testkit/TestkitDocSpec.scala) { #test-kit-base } +@@snip [TestkitDocSpec.scala]($code$/scala/docs/testkit/TestkitDocSpec.scala) { #test-kit-base } The `implicit lazy val system` must be declared exactly like that (you can of course pass arguments to the actor system factory as needed) because trait diff --git a/akka-docs/src/main/paradox/scala/testkit-example.md b/akka-docs/src/main/paradox/scala/testkit-example.md index f59baf753c..6a2e72c710 100644 --- a/akka-docs/src/main/paradox/scala/testkit-example.md +++ b/akka-docs/src/main/paradox/scala/testkit-example.md @@ -2,4 +2,4 @@ Ray Roestenburg's example code from [his blog](http://roestenburg.agilesquad.com/2011/02/unit-testing-akka-actors-with-testkit_12.html) adapted to work with Akka 2.x. -@@snip [TestKitUsageSpec.scala](code/docs/testkit/TestKitUsageSpec.scala) { #testkit-usage } \ No newline at end of file +@@snip [TestKitUsageSpec.scala]($code$/scala/docs/testkit/TestKitUsageSpec.scala) { #testkit-usage } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/typed-actors.md b/akka-docs/src/main/paradox/scala/typed-actors.md index 037541dce9..f6ff7bee1c 100644 --- a/akka-docs/src/main/paradox/scala/typed-actors.md +++ b/akka-docs/src/main/paradox/scala/typed-actors.md @@ -46,7 +46,7 @@ They have their niche, use them sparingly. Before we create our first Typed Actor we should first go through the tools that we have at our disposal, it's located in `akka.actor.TypedActor`. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-extension-tools } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-extension-tools } @@@ warning @@ -63,34 +63,34 @@ To create a Typed Actor you need to have one or more interfaces, and one impleme The following imports are assumed: -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #imports } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #imports } Our example interface: -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } Our example implementation of that interface: -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } The most trivial way of creating a Typed Actor instance of our `Squarer`: -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create1 } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create1 } First type is the type of the proxy, the second type is the type of the implementation. If you need to call a specific constructor you do it like this: -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create2 } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create2 } Since you supply a `Props`, you can specify which dispatcher to use, what the default timeout should be used and more. Now, our `Squarer` doesn't have any methods, so we'd better add those. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } Alright, now we've got some methods we can call, but we need to implement those in SquarerImpl. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } Excellent, now we have an interface and an implementation of that interface, and we know how to create a Typed Actor from that, so let's look at calling these methods. @@ -115,25 +115,25 @@ we *strongly* recommend that parameters passed are immutable. ### One-way message send -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-oneway } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-oneway } As simple as that! The method will be executed on another thread; asynchronously. ### Request-reply message send -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-option } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-option } This will block for as long as the timeout that was set in the Props of the Typed Actor, if needed. It will return `None` if a timeout occurs. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-strict } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-strict } This will block for as long as the timeout that was set in the Props of the Typed Actor, if needed. It will throw a `java.util.concurrent.TimeoutException` if a timeout occurs. ### Request-reply-with-future message send -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-future } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-future } This call is asynchronous, and the Future returned can be used for asynchronous composition. @@ -141,11 +141,11 @@ This call is asynchronous, and the Future returned can be used for asynchronous Since Akka's Typed Actors are backed by Akka Actors they must be stopped when they aren't needed anymore. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-stop } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-stop } This asynchronously stops the Typed Actor associated with the specified proxy ASAP. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-poisonpill } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-poisonpill } This asynchronously stops the Typed Actor associated with the specified proxy after it's done with all calls that were made prior to this call. @@ -155,7 +155,7 @@ after it's done with all calls that were made prior to this call. Since you can obtain a contextual Typed Actor Extension by passing in an `ActorContext` you can create child Typed Actors by invoking `typedActorOf(..)` on that: -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-hierarchy } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-hierarchy } You can also create a child Typed Actor in regular Akka Actors by giving the `ActorContext` as an input parameter to TypedActor.get(…). @@ -202,15 +202,15 @@ The ActorRef needs to accept `MethodCall` messages. Since `TypedActors` are backed by `Akka Actors`, you can use `typedActorOf` to proxy `ActorRefs` potentially residing on remote nodes. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-remote } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-remote } ## Supercharging Here's an example on how you can use traits to mix in behavior in your Typed Actors. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge } -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge-usage } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge-usage } ## Typed Router pattern @@ -220,10 +220,10 @@ which can implement a specific routing logic, such as `smallest-mailbox` or `con Routers are not provided directly for typed actors, but it is really easy to leverage an untyped router and use a typed proxy in front of it. To showcase this let's create typed actors that assign themselves some random `id`, so we know that in fact, the router has sent the message to different actors: -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-router-types } +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router-types } In order to round robin among a few instances of such actors, you can simply create a plain untyped router, and then facade it with a `TypedActor` like shown in the example below. This works because typed actors of course communicate using the same mechanisms as normal actors, and methods calls on them get transformed into message sends of `MethodCall` messages. -@@snip [TypedActorDocSpec.scala](code/docs/actor/TypedActorDocSpec.scala) { #typed-router } \ No newline at end of file +@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/typed.md b/akka-docs/src/main/paradox/scala/typed.md index 3418d5ad94..4bf433abd1 100644 --- a/akka-docs/src/main/paradox/scala/typed.md +++ b/akka-docs/src/main/paradox/scala/typed.md @@ -13,12 +13,12 @@ As discussed in @ref:[Actor Systems](../scala/general/actor-systems.md) (and fol sending messages between independent units of computation, but how does that look like? In all of the following these imports are assumed: -@@snip [IntroSpec.scala](code/docs/akka/typed/IntroSpec.scala) { #imports } +@@snip [IntroSpec.scala]($code$/scala/docs/akka/typed/IntroSpec.scala) { #imports } With these in place we can define our first Actor, and of course it will say hello! -@@snip [IntroSpec.scala](code/docs/akka/typed/IntroSpec.scala) { #hello-world-actor } +@@snip [IntroSpec.scala]($code$/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-actor } This small piece of code defines two message types, one for commanding the Actor to greet someone and one that the Actor will use to confirm that it has @@ -52,7 +52,7 @@ wrapped scope—the `HelloWorld` object. Now we want to try out this Actor, so we must start an ActorSystem to host it: -@@snip [IntroSpec.scala](code/docs/akka/typed/IntroSpec.scala) { #hello-world } +@@snip [IntroSpec.scala]($code$/scala/docs/akka/typed/IntroSpec.scala) { #hello-world } After importing the Actor’s protocol definition we start an Actor system from the defined behavior. @@ -144,7 +144,7 @@ a message that contains their screen name and then they can post messages. The chat room Actor will disseminate all posted messages to all currently connected client Actors. The protocol definition could look like the following: -@@snip [IntroSpec.scala](code/docs/akka/typed/IntroSpec.scala) { #chatroom-protocol } +@@snip [IntroSpec.scala]($code$/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-protocol } Initially the client Actors only get access to an `ActorRef[GetSession]` which allows them to make the first step. Once a client’s session has been @@ -161,7 +161,7 @@ full protocol that can involve multiple Actors and that can evolve over multiple steps. The implementation of the chat room protocol would be as simple as the following: -@@snip [IntroSpec.scala](code/docs/akka/typed/IntroSpec.scala) { #chatroom-behavior } +@@snip [IntroSpec.scala]($code$/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-behavior } The core of this behavior is stateful, the chat room itself does not change into something else when sessions are established, but we introduce a variable @@ -202,7 +202,7 @@ problematic, so passing an `ActorRef[PostSessionMessage]` where In order to see this chat room in action we need to write a client Actor that can use it: -@@snip [IntroSpec.scala](code/docs/akka/typed/IntroSpec.scala) { #chatroom-gabbler } +@@snip [IntroSpec.scala]($code$/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-gabbler } From this behavior we can create an Actor that will accept a chat room session, post a message, wait to see it published, and then terminate. The last step @@ -227,7 +227,7 @@ want—it complicates its logic) or the gabbler from the chat room (which is nonsensical) or we start both of them from a third Actor—our only sensible choice: -@@snip [IntroSpec.scala](code/docs/akka/typed/IntroSpec.scala) { #chatroom-main } +@@snip [IntroSpec.scala]($code$/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-main } In good tradition we call the `main` Actor what it is, it directly corresponds to the `main` method in a traditional Java application. This diff --git a/akka-docs/src/main/paradox/scala/code/docs/CompileOnlySpec.scala b/akka-docs/src/main/scala/docs/CompileOnlySpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/CompileOnlySpec.scala rename to akka-docs/src/main/scala/docs/CompileOnlySpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/ActorDocSpec.scala b/akka-docs/src/main/scala/docs/actor/ActorDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/ActorDocSpec.scala rename to akka-docs/src/main/scala/docs/actor/ActorDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/ByteBufferSerializerDocSpec.scala b/akka-docs/src/main/scala/docs/actor/ByteBufferSerializerDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/ByteBufferSerializerDocSpec.scala rename to akka-docs/src/main/scala/docs/actor/ByteBufferSerializerDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/FSMDocSpec.scala b/akka-docs/src/main/scala/docs/actor/FSMDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/FSMDocSpec.scala rename to akka-docs/src/main/scala/docs/actor/FSMDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/FaultHandlingDocSample.scala b/akka-docs/src/main/scala/docs/actor/FaultHandlingDocSample.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/FaultHandlingDocSample.scala rename to akka-docs/src/main/scala/docs/actor/FaultHandlingDocSample.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/src/main/scala/docs/actor/FaultHandlingDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/FaultHandlingDocSpec.scala rename to akka-docs/src/main/scala/docs/actor/FaultHandlingDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/InitializationDocSpec.scala b/akka-docs/src/main/scala/docs/actor/InitializationDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/InitializationDocSpec.scala rename to akka-docs/src/main/scala/docs/actor/InitializationDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/PropsEdgeCaseSpec.scala b/akka-docs/src/main/scala/docs/actor/PropsEdgeCaseSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/PropsEdgeCaseSpec.scala rename to akka-docs/src/main/scala/docs/actor/PropsEdgeCaseSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/SchedulerDocSpec.scala b/akka-docs/src/main/scala/docs/actor/SchedulerDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/SchedulerDocSpec.scala rename to akka-docs/src/main/scala/docs/actor/SchedulerDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/SharedMutableStateDocSpec.scala b/akka-docs/src/main/scala/docs/actor/SharedMutableStateDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/SharedMutableStateDocSpec.scala rename to akka-docs/src/main/scala/docs/actor/SharedMutableStateDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/TypedActorDocSpec.scala b/akka-docs/src/main/scala/docs/actor/TypedActorDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/TypedActorDocSpec.scala rename to akka-docs/src/main/scala/docs/actor/TypedActorDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/actor/UnnestedReceives.scala b/akka-docs/src/main/scala/docs/actor/UnnestedReceives.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/actor/UnnestedReceives.scala rename to akka-docs/src/main/scala/docs/actor/UnnestedReceives.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/agent/AgentDocSpec.scala b/akka-docs/src/main/scala/docs/agent/AgentDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/agent/AgentDocSpec.scala rename to akka-docs/src/main/scala/docs/agent/AgentDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/akka/typed/IntroSpec.scala b/akka-docs/src/main/scala/docs/akka/typed/IntroSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/akka/typed/IntroSpec.scala rename to akka-docs/src/main/scala/docs/akka/typed/IntroSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/camel/Consumers.scala b/akka-docs/src/main/scala/docs/camel/Consumers.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/camel/Consumers.scala rename to akka-docs/src/main/scala/docs/camel/Consumers.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/camel/CustomRoute.scala b/akka-docs/src/main/scala/docs/camel/CustomRoute.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/camel/CustomRoute.scala rename to akka-docs/src/main/scala/docs/camel/CustomRoute.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/camel/Introduction.scala b/akka-docs/src/main/scala/docs/camel/Introduction.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/camel/Introduction.scala rename to akka-docs/src/main/scala/docs/camel/Introduction.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/camel/Producers.scala b/akka-docs/src/main/scala/docs/camel/Producers.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/camel/Producers.scala rename to akka-docs/src/main/scala/docs/camel/Producers.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/camel/PublishSubscribe.scala b/akka-docs/src/main/scala/docs/camel/PublishSubscribe.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/camel/PublishSubscribe.scala rename to akka-docs/src/main/scala/docs/camel/PublishSubscribe.scala diff --git a/akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/src/main/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala rename to akka-docs/src/main/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/ClusterDocSpec.scala b/akka-docs/src/main/scala/docs/cluster/ClusterDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/ClusterDocSpec.scala rename to akka-docs/src/main/scala/docs/cluster/ClusterDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/FactorialBackend.scala b/akka-docs/src/main/scala/docs/cluster/FactorialBackend.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/FactorialBackend.scala rename to akka-docs/src/main/scala/docs/cluster/FactorialBackend.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/FactorialFrontend.scala b/akka-docs/src/main/scala/docs/cluster/FactorialFrontend.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/FactorialFrontend.scala rename to akka-docs/src/main/scala/docs/cluster/FactorialFrontend.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/MetricsListener.scala b/akka-docs/src/main/scala/docs/cluster/MetricsListener.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/MetricsListener.scala rename to akka-docs/src/main/scala/docs/cluster/MetricsListener.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/SimpleClusterListener.scala b/akka-docs/src/main/scala/docs/cluster/SimpleClusterListener.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/SimpleClusterListener.scala rename to akka-docs/src/main/scala/docs/cluster/SimpleClusterListener.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/SimpleClusterListener2.scala b/akka-docs/src/main/scala/docs/cluster/SimpleClusterListener2.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/SimpleClusterListener2.scala rename to akka-docs/src/main/scala/docs/cluster/SimpleClusterListener2.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/TransformationBackend.scala b/akka-docs/src/main/scala/docs/cluster/TransformationBackend.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/TransformationBackend.scala rename to akka-docs/src/main/scala/docs/cluster/TransformationBackend.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/TransformationFrontend.scala b/akka-docs/src/main/scala/docs/cluster/TransformationFrontend.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/TransformationFrontend.scala rename to akka-docs/src/main/scala/docs/cluster/TransformationFrontend.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/cluster/TransformationMessages.scala b/akka-docs/src/main/scala/docs/cluster/TransformationMessages.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/cluster/TransformationMessages.scala rename to akka-docs/src/main/scala/docs/cluster/TransformationMessages.scala diff --git a/akka-docs/src/main/paradox/scala/general/code/docs/config/ConfigDocSpec.scala b/akka-docs/src/main/scala/docs/config/ConfigDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/general/code/docs/config/ConfigDocSpec.scala rename to akka-docs/src/main/scala/docs/config/ConfigDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/ddata/DistributedDataDocSpec.scala b/akka-docs/src/main/scala/docs/ddata/DistributedDataDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/ddata/DistributedDataDocSpec.scala rename to akka-docs/src/main/scala/docs/ddata/DistributedDataDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/ddata/ShoppingCart.scala b/akka-docs/src/main/scala/docs/ddata/ShoppingCart.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/ddata/ShoppingCart.scala rename to akka-docs/src/main/scala/docs/ddata/ShoppingCart.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/ddata/TwoPhaseSet.scala b/akka-docs/src/main/scala/docs/ddata/TwoPhaseSet.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/ddata/TwoPhaseSet.scala rename to akka-docs/src/main/scala/docs/ddata/TwoPhaseSet.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala b/akka-docs/src/main/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala rename to akka-docs/src/main/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala b/akka-docs/src/main/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala rename to akka-docs/src/main/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/src/main/scala/docs/dispatcher/DispatcherDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/dispatcher/DispatcherDocSpec.scala rename to akka-docs/src/main/scala/docs/dispatcher/DispatcherDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/dispatcher/MyUnboundedMailbox.scala b/akka-docs/src/main/scala/docs/dispatcher/MyUnboundedMailbox.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/dispatcher/MyUnboundedMailbox.scala rename to akka-docs/src/main/scala/docs/dispatcher/MyUnboundedMailbox.scala diff --git a/akka-docs/src/main/paradox/scala/common/code/docs/duration/Sample.scala b/akka-docs/src/main/scala/docs/duration/Sample.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/common/code/docs/duration/Sample.scala rename to akka-docs/src/main/scala/docs/duration/Sample.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/event/EventBusDocSpec.scala b/akka-docs/src/main/scala/docs/event/EventBusDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/event/EventBusDocSpec.scala rename to akka-docs/src/main/scala/docs/event/EventBusDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/event/LoggingDocSpec.scala b/akka-docs/src/main/scala/docs/event/LoggingDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/event/LoggingDocSpec.scala rename to akka-docs/src/main/scala/docs/event/LoggingDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/extension/ExtensionDocSpec.scala b/akka-docs/src/main/scala/docs/extension/ExtensionDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/extension/ExtensionDocSpec.scala rename to akka-docs/src/main/scala/docs/extension/ExtensionDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/src/main/scala/docs/extension/SettingsExtensionDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/extension/SettingsExtensionDocSpec.scala rename to akka-docs/src/main/scala/docs/extension/SettingsExtensionDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/additional/code/docs/faq/Faq.scala b/akka-docs/src/main/scala/docs/faq/Faq.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/additional/code/docs/faq/Faq.scala rename to akka-docs/src/main/scala/docs/faq/Faq.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/future/FutureDocSpec.scala b/akka-docs/src/main/scala/docs/future/FutureDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/future/FutureDocSpec.scala rename to akka-docs/src/main/scala/docs/future/FutureDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/io/EchoServer.scala b/akka-docs/src/main/scala/docs/io/EchoServer.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/io/EchoServer.scala rename to akka-docs/src/main/scala/docs/io/EchoServer.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/io/IODocSpec.scala b/akka-docs/src/main/scala/docs/io/IODocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/io/IODocSpec.scala rename to akka-docs/src/main/scala/docs/io/IODocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/io/ReadBackPressure.scala b/akka-docs/src/main/scala/docs/io/ReadBackPressure.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/io/ReadBackPressure.scala rename to akka-docs/src/main/scala/docs/io/ReadBackPressure.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/io/ScalaUdpMulticast.scala b/akka-docs/src/main/scala/docs/io/ScalaUdpMulticast.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/io/ScalaUdpMulticast.scala rename to akka-docs/src/main/scala/docs/io/ScalaUdpMulticast.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/io/ScalaUdpMulticastSpec.scala b/akka-docs/src/main/scala/docs/io/ScalaUdpMulticastSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/io/ScalaUdpMulticastSpec.scala rename to akka-docs/src/main/scala/docs/io/ScalaUdpMulticastSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/io/UdpDocSpec.scala b/akka-docs/src/main/scala/docs/io/UdpDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/io/UdpDocSpec.scala rename to akka-docs/src/main/scala/docs/io/UdpDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/pattern/BackoffSupervisorDocSpec.scala b/akka-docs/src/main/scala/docs/pattern/BackoffSupervisorDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/pattern/BackoffSupervisorDocSpec.scala rename to akka-docs/src/main/scala/docs/pattern/BackoffSupervisorDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/pattern/SchedulerPatternSpec.scala b/akka-docs/src/main/scala/docs/pattern/SchedulerPatternSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/pattern/SchedulerPatternSpec.scala rename to akka-docs/src/main/scala/docs/pattern/SchedulerPatternSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceDocSpec.scala b/akka-docs/src/main/scala/docs/persistence/PersistenceDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceDocSpec.scala rename to akka-docs/src/main/scala/docs/persistence/PersistenceDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceEventAdapterDocSpec.scala b/akka-docs/src/main/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceEventAdapterDocSpec.scala rename to akka-docs/src/main/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceMultiDocSpec.scala b/akka-docs/src/main/scala/docs/persistence/PersistenceMultiDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceMultiDocSpec.scala rename to akka-docs/src/main/scala/docs/persistence/PersistenceMultiDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/PersistencePluginDocSpec.scala b/akka-docs/src/main/scala/docs/persistence/PersistencePluginDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/PersistencePluginDocSpec.scala rename to akka-docs/src/main/scala/docs/persistence/PersistencePluginDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala b/akka-docs/src/main/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala rename to akka-docs/src/main/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceSerializerDocSpec.scala b/akka-docs/src/main/scala/docs/persistence/PersistenceSerializerDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/PersistenceSerializerDocSpec.scala rename to akka-docs/src/main/scala/docs/persistence/PersistenceSerializerDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/PersistentActorExample.scala b/akka-docs/src/main/scala/docs/persistence/PersistentActorExample.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/PersistentActorExample.scala rename to akka-docs/src/main/scala/docs/persistence/PersistentActorExample.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala b/akka-docs/src/main/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala rename to akka-docs/src/main/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/query/MyEventsByTagPublisher.scala b/akka-docs/src/main/scala/docs/persistence/query/MyEventsByTagPublisher.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/query/MyEventsByTagPublisher.scala rename to akka-docs/src/main/scala/docs/persistence/query/MyEventsByTagPublisher.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/persistence/query/PersistenceQueryDocSpec.scala b/akka-docs/src/main/scala/docs/persistence/query/PersistenceQueryDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/persistence/query/PersistenceQueryDocSpec.scala rename to akka-docs/src/main/scala/docs/persistence/query/PersistenceQueryDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala b/akka-docs/src/main/scala/docs/remoting/RemoteDeploymentDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala rename to akka-docs/src/main/scala/docs/remoting/RemoteDeploymentDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/routing/ConsistentHashingRouterDocSpec.scala b/akka-docs/src/main/scala/docs/routing/ConsistentHashingRouterDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/routing/ConsistentHashingRouterDocSpec.scala rename to akka-docs/src/main/scala/docs/routing/ConsistentHashingRouterDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/routing/CustomRouterDocSpec.scala b/akka-docs/src/main/scala/docs/routing/CustomRouterDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/routing/CustomRouterDocSpec.scala rename to akka-docs/src/main/scala/docs/routing/CustomRouterDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/routing/RouterDocSpec.scala b/akka-docs/src/main/scala/docs/routing/RouterDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/routing/RouterDocSpec.scala rename to akka-docs/src/main/scala/docs/routing/RouterDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/serialization/SerializationDocSpec.scala b/akka-docs/src/main/scala/docs/serialization/SerializationDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/serialization/SerializationDocSpec.scala rename to akka-docs/src/main/scala/docs/serialization/SerializationDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/ActorPublisherDocSpec.scala b/akka-docs/src/main/scala/docs/stream/ActorPublisherDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/ActorPublisherDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/ActorPublisherDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/ActorSubscriberDocSpec.scala b/akka-docs/src/main/scala/docs/stream/ActorSubscriberDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/ActorSubscriberDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/ActorSubscriberDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/BidiFlowDocSpec.scala b/akka-docs/src/main/scala/docs/stream/BidiFlowDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/BidiFlowDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/BidiFlowDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/CompositionDocSpec.scala b/akka-docs/src/main/scala/docs/stream/CompositionDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/CompositionDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/CompositionDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/FlowDocSpec.scala b/akka-docs/src/main/scala/docs/stream/FlowDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/FlowDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/FlowDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/FlowErrorDocSpec.scala b/akka-docs/src/main/scala/docs/stream/FlowErrorDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/FlowErrorDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/FlowErrorDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/FlowParallelismDocSpec.scala b/akka-docs/src/main/scala/docs/stream/FlowParallelismDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/FlowParallelismDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/FlowParallelismDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/GraphCyclesSpec.scala b/akka-docs/src/main/scala/docs/stream/GraphCyclesSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/GraphCyclesSpec.scala rename to akka-docs/src/main/scala/docs/stream/GraphCyclesSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/GraphDSLDocSpec.scala b/akka-docs/src/main/scala/docs/stream/GraphDSLDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/GraphDSLDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/GraphDSLDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/GraphStageDocSpec.scala b/akka-docs/src/main/scala/docs/stream/GraphStageDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/GraphStageDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/GraphStageDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/GraphStageLoggingDocSpec.scala b/akka-docs/src/main/scala/docs/stream/GraphStageLoggingDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/GraphStageLoggingDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/GraphStageLoggingDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/HubsDocSpec.scala b/akka-docs/src/main/scala/docs/stream/HubsDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/HubsDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/HubsDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/IntegrationDocSpec.scala b/akka-docs/src/main/scala/docs/stream/IntegrationDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/IntegrationDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/IntegrationDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/KillSwitchDocSpec.scala b/akka-docs/src/main/scala/docs/stream/KillSwitchDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/KillSwitchDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/KillSwitchDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/MigrationsScala.scala b/akka-docs/src/main/scala/docs/stream/MigrationsScala.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/MigrationsScala.scala rename to akka-docs/src/main/scala/docs/stream/MigrationsScala.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/QuickStartDocSpec.scala b/akka-docs/src/main/scala/docs/stream/QuickStartDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/QuickStartDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/QuickStartDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/RateTransformationDocSpec.scala b/akka-docs/src/main/scala/docs/stream/RateTransformationDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/RateTransformationDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/RateTransformationDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/ReactiveStreamsDocSpec.scala b/akka-docs/src/main/scala/docs/stream/ReactiveStreamsDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/ReactiveStreamsDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/ReactiveStreamsDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/StreamBuffersRateSpec.scala b/akka-docs/src/main/scala/docs/stream/StreamBuffersRateSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/StreamBuffersRateSpec.scala rename to akka-docs/src/main/scala/docs/stream/StreamBuffersRateSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/StreamPartialGraphDSLDocSpec.scala b/akka-docs/src/main/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/StreamPartialGraphDSLDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/StreamTestKitDocSpec.scala b/akka-docs/src/main/scala/docs/stream/StreamTestKitDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/StreamTestKitDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/StreamTestKitDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/TwitterStreamQuickstartDocSpec.scala b/akka-docs/src/main/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/TwitterStreamQuickstartDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeByteStrings.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeByteStrings.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeByteStrings.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeByteStrings.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeCollectingMetrics.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeCollectingMetrics.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeCollectingMetrics.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeCollectingMetrics.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeDecompress.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeDecompress.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeDecompress.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeDecompress.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeDigest.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeDigest.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeDigest.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeDigest.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeDroppyBroadcast.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeDroppyBroadcast.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeFlattenSeq.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeFlattenSeq.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeFlattenSeq.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeFlattenSeq.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeGlobalRateLimit.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeGlobalRateLimit.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeHold.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeHold.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeHold.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeHold.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeKeepAlive.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeKeepAlive.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeKeepAlive.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeKeepAlive.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeLoggingElements.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeLoggingElements.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeLoggingElements.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeLoggingElements.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeManualTrigger.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeManualTrigger.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeManualTrigger.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeManualTrigger.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeMissedTicks.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeMissedTicks.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeMissedTicks.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeMissedTicks.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeMultiGroupBy.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeMultiGroupBy.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeParseLines.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeParseLines.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeParseLines.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeParseLines.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeReduceByKey.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeReduceByKey.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeReduceByKey.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeReduceByKey.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeSeq.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeSeq.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeSeq.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeSeq.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeSimpleDrop.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeSimpleDrop.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeSimpleDrop.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeSimpleDrop.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeSpec.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeSpec.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeWorkerPool.scala b/akka-docs/src/main/scala/docs/stream/cookbook/RecipeWorkerPool.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/cookbook/RecipeWorkerPool.scala rename to akka-docs/src/main/scala/docs/stream/cookbook/RecipeWorkerPool.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/io/StreamFileDocSpec.scala b/akka-docs/src/main/scala/docs/stream/io/StreamFileDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/io/StreamFileDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/io/StreamFileDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/stream/io/StreamTcpDocSpec.scala b/akka-docs/src/main/scala/docs/stream/io/StreamTcpDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/stream/io/StreamTcpDocSpec.scala rename to akka-docs/src/main/scala/docs/stream/io/StreamTcpDocSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/testkit/ParentChildSpec.scala b/akka-docs/src/main/scala/docs/testkit/ParentChildSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/testkit/ParentChildSpec.scala rename to akka-docs/src/main/scala/docs/testkit/ParentChildSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/testkit/PlainWordSpec.scala b/akka-docs/src/main/scala/docs/testkit/PlainWordSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/testkit/PlainWordSpec.scala rename to akka-docs/src/main/scala/docs/testkit/PlainWordSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/testkit/TestKitUsageSpec.scala b/akka-docs/src/main/scala/docs/testkit/TestKitUsageSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/testkit/TestKitUsageSpec.scala rename to akka-docs/src/main/scala/docs/testkit/TestKitUsageSpec.scala diff --git a/akka-docs/src/main/paradox/scala/code/docs/testkit/TestkitDocSpec.scala b/akka-docs/src/main/scala/docs/testkit/TestkitDocSpec.scala similarity index 100% rename from akka-docs/src/main/paradox/scala/code/docs/testkit/TestkitDocSpec.scala rename to akka-docs/src/main/scala/docs/testkit/TestkitDocSpec.scala