diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 74ff8ba90b..463d9146a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,7 +62,7 @@ The steps are exactly the same for everyone involved in the project (be it core 1. [Fork the project](https://github.com/akka/akka#fork-destination-box) on GitHub. You'll need to create a feature-branch for your work on your fork, as this way you'll be able to submit a pull request against the mainline Akka. 1. Create a branch on your fork and work on the feature. For example: `git checkout -b wip-custom-headers-akka-http` - Please make sure to follow the general quality guidelines (specified below) when developing your patch. - - Please write additional tests covering your feature and adjust existing ones if needed before submitting your pull request. The `validatePullRequest` sbt task ([explained below](#validatePullRequest)) may come in handy to verify your changes are correct. + - Please write additional tests covering your feature and adjust existing ones if needed before submitting your pull request. The `validatePullRequest` sbt task ([explained below](#the-validatepullrequest-task)) may come in handy to verify your changes are correct. 1. Once your feature is complete, prepare the commit following our [Creating Commits And Writing Commit Messages](#creating-commits-and-writing-commit-messages). For example, a good commit message would be: `Adding compression support for Manifests #22222` (note the reference to the ticket it aimed to resolve). 1. If it's a new feature, or a change of behaviour, document it on the [akka-docs](https://github.com/akka/akka/tree/master/akka-docs), remember, an undocumented feature is not a feature. If the feature was touching Scala or Java DSL, make sure to document it in both the Java and Scala documentation (usually in a file of the same name, but under `/scala/` instead of `/java/` etc). 1. Now it's finally time to [submit the pull request](https://help.github.com/articles/using-pull-requests)! @@ -181,7 +181,7 @@ an error like this: [error] filter with: ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.foldAsync") ``` -In such situations it's good to consult with a core team member if the violation can be safely ignored (by adding the above snippet to `project/MiMa.scala`), or if it would indeed break binary compatibility. +In such situations it's good to consult with a core team member if the violation can be safely ignored (by adding the above snippet to `/src/main/mima-filters/.backwards.excludes`), or if it would indeed break binary compatibility. Situations when it may be fine to ignore a MiMa issued warning include: @@ -233,6 +233,15 @@ akka-docs/paradox The generated html documentation is in `akka-docs/target/paradox/site/main/index.html`. +### Java- or Scala-specific documentation + +For new documentation chapters, we recommend adding a page to the `scala` tree documenting both Java and Scala, using [tabs](http://developer.lightbend.com/docs/paradox/latest/features/snippet-inclusion.html) for code snippets and [groups]( http://developer.lightbend.com/docs/paradox/latest/features/groups.html) for other Java- or Scala-specific segments or sections. +An example of such a 'merged' page is `akka-docs/src/main/paradox/scala/actors.md`. + +Add a symlink to the `java` tree to make the page available there as well. + +Consolidation of existing pages is tracked in [issue #23052](https://github.com/akka/akka/issues/23052) + ### Note for paradox on Windows On Windows, you need special care to generate html documentation with paradox. diff --git a/README.md b/README.md index b304435eda..875ae13b15 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Akka is here to change that. Using the Actor Model we raise the abstraction level and provide a better platform to build correct concurrent and scalable applications. This model is a perfect match for the principles laid out in the [Reactive Manifesto](http://www.reactivemanifesto.org/). -For resilience we adopt the "Let it crash" model which the telecom industry has used with great success to build applications that self-heal and systems that never stop. +For resilience, we adopt the "Let it crash" model which the telecom industry has used with great success to build applications that self-heal and systems that never stop. Actors also provide the abstraction for transparent distribution and the basis for truly scalable and fault-tolerant applications. diff --git a/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala new file mode 100644 index 0000000000..42e95a7233 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala @@ -0,0 +1,266 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ +package akka.actor + +import java.util.concurrent.atomic.AtomicInteger + +import scala.concurrent.duration._ +import scala.util.control.NoStackTrace +import akka.testkit._ +import scala.concurrent.Await + +object TimerSpec { + sealed trait Command + case class Tick(n: Int) extends Command + case object Bump extends Command + case class SlowThenBump(latch: TestLatch) extends Command + with NoSerializationVerificationNeeded + case object End extends Command + case class Throw(e: Throwable) extends Command + case object Cancel extends Command + case class SlowThenThrow(latch: TestLatch, e: Throwable) extends Command + with NoSerializationVerificationNeeded + + sealed trait Event + case class Tock(n: Int) extends Event + case class GotPostStop(timerActive: Boolean) extends Event + case class GotPreRestart(timerActive: Boolean) extends Event + + class Exc extends RuntimeException("simulated exc") with NoStackTrace + + def target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () ⇒ Int): Props = + Props(new Target(monitor, interval, repeat, initial)) + + class Target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () ⇒ Int) extends Actor with Timers { + private var bumpCount = initial() + + if (repeat) + timers.startPeriodicTimer("T", Tick(bumpCount), interval) + else + timers.startSingleTimer("T", Tick(bumpCount), interval) + + override def preRestart(reason: Throwable, message: Option[Any]): Unit = { + monitor ! GotPreRestart(timers.isTimerActive("T")) + // don't call super.preRestart to avoid postStop + } + + override def postStop(): Unit = { + monitor ! GotPostStop(timers.isTimerActive("T")) + } + + def bump(): Unit = { + bumpCount += 1 + timers.startPeriodicTimer("T", Tick(bumpCount), interval) + } + + override def receive = { + case Tick(n) ⇒ + monitor ! Tock(n) + case Bump ⇒ + bump() + case SlowThenBump(latch) ⇒ + Await.ready(latch, 10.seconds) + bump() + case End ⇒ + context.stop(self) + case Cancel ⇒ + timers.cancel("T") + case Throw(e) ⇒ + throw e + case SlowThenThrow(latch, e) ⇒ + Await.ready(latch, 10.seconds) + throw e + } + } + + def fsmTarget(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () ⇒ Int): Props = + Props(new FsmTarget(monitor, interval, repeat, initial)) + + object TheState + + class FsmTarget(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () ⇒ Int) extends FSM[TheState.type, Int] { + + private var restarting = false + + override def preRestart(reason: Throwable, message: Option[Any]): Unit = { + restarting = true + super.preRestart(reason, message) + monitor ! GotPreRestart(isTimerActive("T")) + } + + override def postStop(): Unit = { + super.postStop() + if (!restarting) + monitor ! GotPostStop(isTimerActive("T")) + } + + def bump(bumpCount: Int): State = { + setTimer("T", Tick(bumpCount + 1), interval, repeat) + stay using (bumpCount + 1) + } + + { + val i = initial() + startWith(TheState, i) + setTimer("T", Tick(i), interval, repeat) + } + + when(TheState) { + case Event(Tick(n), _) ⇒ + monitor ! Tock(n) + stay + case Event(Bump, bumpCount) ⇒ + bump(bumpCount) + case Event(SlowThenBump(latch), bumpCount) ⇒ + Await.ready(latch, 10.seconds) + bump(bumpCount) + case Event(End, _) ⇒ + stop() + case Event(Cancel, _) ⇒ + cancelTimer("T") + stay + case Event(Throw(e), _) ⇒ + throw e + case Event(SlowThenThrow(latch, e), _) ⇒ + Await.ready(latch, 10.seconds) + throw e + } + + initialize() + } + +} + +class TimerSpec extends AbstractTimerSpec { + override def testName: String = "Timers" + override def target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () ⇒ Int = () ⇒ 1): Props = + TimerSpec.target(monitor, interval, repeat, initial) +} + +class FsmTimerSpec extends AbstractTimerSpec { + override def testName: String = "FSM Timers" + override def target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () ⇒ Int = () ⇒ 1): Props = + TimerSpec.fsmTarget(monitor, interval, repeat, initial) +} + +abstract class AbstractTimerSpec extends AkkaSpec { + import TimerSpec._ + + val interval = 1.second + val dilatedInterval = interval.dilated + + def target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () ⇒ Int = () ⇒ 1): Props + + def testName: String + + testName must { + "schedule non-repeated ticks" taggedAs TimingTest in { + val probe = TestProbe() + val ref = system.actorOf(target(probe.ref, 10.millis, repeat = false)) + + probe.expectMsg(Tock(1)) + probe.expectNoMsg(100.millis) + + ref ! End + probe.expectMsg(GotPostStop(false)) + } + + "schedule repeated ticks" taggedAs TimingTest in { + val probe = TestProbe() + val ref = system.actorOf(target(probe.ref, dilatedInterval, repeat = true)) + probe.within((interval * 4) - 100.millis) { + probe.expectMsg(Tock(1)) + probe.expectMsg(Tock(1)) + probe.expectMsg(Tock(1)) + } + + ref ! End + probe.expectMsg(GotPostStop(false)) + } + + "replace timer" taggedAs TimingTest in { + val probe = TestProbe() + val ref = system.actorOf(target(probe.ref, dilatedInterval, repeat = true)) + probe.expectMsg(Tock(1)) + val latch = new TestLatch(1) + // next Tock(1) enqueued in mailboxed, but should be discarded because of new timer + ref ! SlowThenBump(latch) + probe.expectNoMsg(interval + 100.millis) + latch.countDown() + probe.expectMsg(Tock(2)) + + ref ! End + probe.expectMsg(GotPostStop(false)) + } + + "cancel timer" taggedAs TimingTest in { + val probe = TestProbe() + val ref = system.actorOf(target(probe.ref, dilatedInterval, repeat = true)) + probe.expectMsg(Tock(1)) + ref ! Cancel + probe.expectNoMsg(dilatedInterval + 100.millis) + + ref ! End + probe.expectMsg(GotPostStop(false)) + } + + "cancel timers when restarted" taggedAs TimingTest in { + val probe = TestProbe() + val ref = system.actorOf(target(probe.ref, dilatedInterval, repeat = true)) + ref ! Throw(new Exc) + probe.expectMsg(GotPreRestart(false)) + + ref ! End + probe.expectMsg(GotPostStop(false)) + } + + "discard timers from old incarnation after restart, alt 1" taggedAs TimingTest in { + val probe = TestProbe() + val startCounter = new AtomicInteger(0) + val ref = system.actorOf(target(probe.ref, dilatedInterval, repeat = true, + initial = () ⇒ startCounter.incrementAndGet())) + probe.expectMsg(Tock(1)) + + val latch = new TestLatch(1) + // next Tock(1) is enqueued in mailbox, but should be discarded by new incarnation + ref ! SlowThenThrow(latch, new Exc) + probe.expectNoMsg(interval + 100.millis) + latch.countDown() + probe.expectMsg(GotPreRestart(false)) + probe.expectNoMsg(interval / 2) + probe.expectMsg(Tock(2)) // this is from the startCounter increment + + ref ! End + probe.expectMsg(GotPostStop(false)) + } + + "discard timers from old incarnation after restart, alt 2" taggedAs TimingTest in { + val probe = TestProbe() + val ref = system.actorOf(target(probe.ref, dilatedInterval, repeat = true)) + probe.expectMsg(Tock(1)) + // change state so that we see that the restart starts over again + ref ! Bump + + probe.expectMsg(Tock(2)) + + val latch = new TestLatch(1) + // next Tock(2) is enqueued in mailbox, but should be discarded by new incarnation + ref ! SlowThenThrow(latch, new Exc) + probe.expectNoMsg(interval + 100.millis) + latch.countDown() + probe.expectMsg(GotPreRestart(false)) + probe.expectMsg(Tock(1)) + + ref ! End + probe.expectMsg(GotPostStop(false)) + } + + "cancel timers when stopped" in { + val probe = TestProbe() + val ref = system.actorOf(target(probe.ref, dilatedInterval, repeat = true)) + ref ! End + probe.expectMsg(GotPostStop(false)) + } + } +} diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala index 9a5838ffa2..6d00e9a85e 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala @@ -139,18 +139,18 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { val ser = SerializationExtension(system) import ser._ - val addr = Address("120", "Monroe Street", "Santa Clara", "95050") + val address = Address("120", "Monroe Street", "Santa Clara", "95050") val person = Person("debasish ghosh", 25, Address("120", "Monroe Street", "Santa Clara", "95050")) "Serialization" must { "have correct bindings" in { - ser.bindings.collectFirst { case (c, s) if c == addr.getClass ⇒ s.getClass } should ===(Some(classOf[JavaSerializer])) + ser.bindings.collectFirst { case (c, s) if c == address.getClass ⇒ s.getClass } should ===(Some(classOf[JavaSerializer])) ser.bindings.collectFirst { case (c, s) if c == classOf[PlainMessage] ⇒ s.getClass } should ===(Some(classOf[NoopSerializer])) } "serialize Address" in { - assert(deserialize(serialize(addr).get, classOf[Address]).get === addr) + assert(deserialize(serialize(address).get, classOf[Address]).get === address) } "serialize Person" in { diff --git a/akka-actor-tests/src/test/scala/akka/util/ImmutableIntMapSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ImmutableIntMapSpec.scala new file mode 100644 index 0000000000..298b9f2d81 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/util/ImmutableIntMapSpec.scala @@ -0,0 +1,151 @@ +/** + * Copyright (C) 2016-2017 Lightbend Inc. + */ +package akka.util +import org.scalatest.Matchers +import org.scalatest.WordSpec + +import scala.util.Random + +class ImmutableIntMapSpec extends WordSpec with Matchers { + + "ImmutableIntMap" must { + + "have no entries when empty" in { + val empty = ImmutableIntMap.empty + empty.size should be(0) + empty.keysIterator.toList should be(Nil) + } + + "add and get entries" in { + val m1 = ImmutableIntMap.empty.updated(10, 10) + m1.keysIterator.toList should be(List(10)) + m1.keysIterator.map(m1.get).toList should be(List(10)) + + val m2 = m1.updated(20, 20) + m2.keysIterator.toList should be(List(10, 20)) + m2.keysIterator.map(m2.get).toList should be(List(10, 20)) + + val m3 = m1.updated(5, 5) + m3.keysIterator.toList should be(List(5, 10)) + m3.keysIterator.map(m3.get).toList should be(List(5, 10)) + + val m4 = m2.updated(5, 5) + m4.keysIterator.toList should be(List(5, 10, 20)) + m4.keysIterator.map(m4.get).toList should be(List(5, 10, 20)) + + val m5 = m4.updated(15, 15) + m5.keysIterator.toList should be(List(5, 10, 15, 20)) + m5.keysIterator.map(m5.get).toList should be(List(5, 10, 15, 20)) + } + + "replace entries" in { + val m1 = ImmutableIntMap.empty.updated(10, 10).updated(10, 11) + m1.keysIterator.map(m1.get).toList should be(List(11)) + + val m2 = m1.updated(20, 20).updated(30, 30) + .updated(20, 21).updated(30, 31) + m2.keysIterator.map(m2.get).toList should be(List(11, 21, 31)) + } + + "update if absent" in { + val m1 = ImmutableIntMap.empty.updated(10, 10).updated(20, 11) + m1.updateIfAbsent(10, 15) should be(ImmutableIntMap.empty.updated(10, 10).updated(20, 11)) + m1.updateIfAbsent(30, 12) should be(ImmutableIntMap.empty.updated(10, 10).updated(20, 11).updated(30, 12)) + } + + "have toString" in { + ImmutableIntMap.empty.toString should be("ImmutableIntMap()") + ImmutableIntMap.empty.updated(10, 10).toString should be("ImmutableIntMap(10 -> 10)") + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).toString should be( + "ImmutableIntMap(10 -> 10, 20 -> 20)") + } + + "have equals and hashCode" in { + ImmutableIntMap.empty.updated(10, 10) should be(ImmutableIntMap.empty.updated(10, 10)) + ImmutableIntMap.empty.updated(10, 10).hashCode should be( + ImmutableIntMap.empty.updated(10, 10).hashCode) + + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) should be( + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30)) + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30).hashCode should be( + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30).hashCode) + + ImmutableIntMap.empty.updated(10, 10).updated(20, 20) should not be ImmutableIntMap.empty.updated(10, 10) + + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) should not be + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 31) + + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) should not be + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(31, 30) + + ImmutableIntMap.empty should be(ImmutableIntMap.empty) + ImmutableIntMap.empty.hashCode should be(ImmutableIntMap.empty.hashCode) + } + + "remove entries" in { + val m1 = ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) + + val m2 = m1.remove(10) + m2.keysIterator.map(m2.get).toList should be(List(20, 30)) + + val m3 = m1.remove(20) + m3.keysIterator.map(m3.get).toList should be(List(10, 30)) + + val m4 = m1.remove(30) + m4.keysIterator.map(m4.get).toList should be(List(10, 20)) + + m1.remove(5) should be(m1) + + m1.remove(10).remove(20).remove(30) should be(ImmutableIntMap.empty) + } + + "get None when entry doesn't exist" in { + val m1 = ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) + m1.get(5) should be(Int.MinValue) + m1.get(15) should be(Int.MinValue) + m1.get(25) should be(Int.MinValue) + m1.get(35) should be(Int.MinValue) + } + + "contain keys" in { + val m1 = ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) + m1.contains(10) should be(true) + m1.contains(20) should be(true) + m1.contains(30) should be(true) + m1.contains(5) should be(false) + m1.contains(25) should be(false) + } + + "have correct behavior for random operations" in { + val seed = System.nanoTime() + val rnd = new Random(seed) + + var longMap = ImmutableIntMap.empty + var reference = Map.empty[Long, Int] + + def verify(): Unit = { + val m = longMap.keysIterator.map(key ⇒ key → longMap.get(key)).toMap + + m should be(reference) + } + + (1 to 1000).foreach { i ⇒ + withClue(s"seed=$seed, iteration=$i") { + val key = rnd.nextInt(100) + val value = rnd.nextPrintableChar() + rnd.nextInt(3) match { + case 0 | 1 ⇒ + longMap = longMap.updated(key, value) + reference = reference.updated(key, value) + case 2 ⇒ + longMap = longMap.remove(key) + reference = reference - key + } + verify() + } + } + } + + } +} diff --git a/akka-actor/src/main/mima-filters/2.4.1.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.1.backwards.excludes new file mode 100644 index 0000000000..4df97f190e --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.1.backwards.excludes @@ -0,0 +1,12 @@ +# #19281 BackoffSupervisor updates +ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.BackoffSupervisor.akka$pattern$BackoffSupervisor$$child_=") +ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.BackoffSupervisor.akka$pattern$BackoffSupervisor$$restartCount") +ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.BackoffSupervisor.akka$pattern$BackoffSupervisor$$restartCount_=") +ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.BackoffSupervisor.akka$pattern$BackoffSupervisor$$child") + +# #19487 +ProblemFilters.exclude[Problem]("akka.actor.dungeon.Children*") + +# #19440 +ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.PipeToSupport.pipeCompletionStage") +ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.FutureTimeoutSupport.afterCompletionStage") diff --git a/akka-actor/src/main/mima-filters/2.4.10.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.10.backwards.excludes new file mode 100644 index 0000000000..b085d0a45f --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.10.backwards.excludes @@ -0,0 +1,2 @@ +# #21131 new implementation for Akka Typed +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.DeathWatch.isWatching") diff --git a/akka-actor/src/main/mima-filters/2.4.11.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.11.backwards.excludes new file mode 100644 index 0000000000..436d16bac2 --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.11.backwards.excludes @@ -0,0 +1,4 @@ +# MarkerLoggingAdapter introduced (all internal classes) +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.actor.LocalActorRefProvider.log") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.actor.VirtualPathContainer.log") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.actor.VirtualPathContainer.this") diff --git a/akka-actor/src/main/mima-filters/2.4.12.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.12.backwards.excludes new file mode 100644 index 0000000000..21040c0420 --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.12.backwards.excludes @@ -0,0 +1,2 @@ +# #21775 - overrode ByteString.stringPrefix and made it final +ProblemFilters.exclude[FinalMethodProblem]("akka.util.ByteString.stringPrefix") diff --git a/akka-actor/src/main/mima-filters/2.4.14.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.14.backwards.excludes new file mode 100644 index 0000000000..e8cebb6035 --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.14.backwards.excludes @@ -0,0 +1,2 @@ +# #21894 Programmatic configuration of the ActorSystem +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.this") diff --git a/akka-actor/src/main/mima-filters/2.4.2.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.2.backwards.excludes new file mode 100644 index 0000000000..94d32d3451 --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.2.backwards.excludes @@ -0,0 +1,3 @@ +# #15947 catch mailbox creation failures +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.RepointableActorRef.point") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.Dispatch.initWithFailure") diff --git a/akka-actor/src/main/mima-filters/2.4.8.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.8.backwards.excludes new file mode 100644 index 0000000000..607f83cc61 --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.8.backwards.excludes @@ -0,0 +1,7 @@ +# #20994 adding new decode method, since we're on JDK7+ now +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.util.ByteString.decodeString") + +# #19872 double wildcard for actor deployment config +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.actor.Deployer.lookup") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.WildcardTree.apply") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.WildcardTree.find") diff --git a/akka-actor/src/main/mima-filters/2.4.9.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.9.backwards.excludes new file mode 100644 index 0000000000..b1de2fc850 --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.9.backwards.excludes @@ -0,0 +1,2 @@ +# #21273 minor cleanup of WildcardIndex +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.WildcardIndex.empty") diff --git a/akka-actor/src/main/mima-filters/2.4.x.backwards.excludes b/akka-actor/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..41a731ba90 --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,79 @@ +# #18262 embed FJP, Mailbox extends ForkJoinTask +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#ForkJoinExecutorServiceFactory.threadFactory") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#ForkJoinExecutorServiceFactory.this") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#ForkJoinExecutorServiceFactory.this") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator.validate") +ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask") +ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.MonitorableThreadFactory") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.MonitorableThreadFactory.newThread") +ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinPool") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#AkkaForkJoinPool.this") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#AkkaForkJoinPool.this") +ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.Mailbox") +ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.BalancingDispatcher$SharingMailbox") +ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.MonitorableThreadFactory$AkkaForkJoinWorkerThread") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.MonitorableThreadFactory#AkkaForkJoinWorkerThread.this") + +# #22295 Improve Circuit breaker +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.CircuitBreaker#State.callThrough") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.CircuitBreaker#State.invoke") + +# #21717 Improvements to AbstractActor API +ProblemFilters.exclude[Problem]("akka.japi.pf.ReceiveBuilder*") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.AbstractActor.receive") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.AbstractActor.createReceive") +ProblemFilters.exclude[MissingClassProblem]("akka.actor.AbstractActorContext") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.actor.AbstractActor.getContext") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.actor.AbstractActor.emptyBehavior") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.Children.findChild") +ProblemFilters.exclude[MissingTypesProblem]("akka.actor.ActorCell") +ProblemFilters.exclude[MissingTypesProblem]("akka.routing.RoutedActorCell") +ProblemFilters.exclude[MissingTypesProblem]("akka.routing.ResizablePoolCell") + +# #21423 remove deprecated ActorSystem termination methods (in 2.5.x) +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.shutdown") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.isTerminated") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.awaitTermination") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.awaitTermination") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystem.shutdown") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystem.isTerminated") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystem.awaitTermination") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystem.awaitTermination") + +# #21423 remove deprecated ActorPath.ElementRegex +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorPath.ElementRegex") + +# #21423 remove some deprecated event bus classes +ProblemFilters.exclude[MissingClassProblem]("akka.event.ActorClassification") +ProblemFilters.exclude[MissingClassProblem]("akka.event.EventStream$") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.event.EventStream.this") +ProblemFilters.exclude[MissingClassProblem]("akka.event.japi.ActorEventBus") + +# #21423 remove deprecated util.Crypt +ProblemFilters.exclude[MissingClassProblem]("akka.util.Crypt") +ProblemFilters.exclude[MissingClassProblem]("akka.util.Crypt$") + +# #21423 removal of deprecated serializer constructors (in 2.5.x) +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.serialization.JavaSerializer.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.serialization.ByteArraySerializer.this") + +# #21423 removal of deprecated constructor in PromiseActorRef +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.PromiseActorRef.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.PromiseActorRef.apply") + +# #21423 remove deprecated methods in routing +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.Pool.nrOfInstances") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.Group.paths") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.PoolBase.nrOfInstances") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.GroupBase.paths") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.GroupBase.getPaths") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.FromConfig.nrOfInstances") + +# #22105 Akka Typed process DSL +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorCell.addFunctionRef") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.dungeon.Children.addFunctionRef") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.Children.addFunctionRef") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.Children.addFunctionRef$default$2") + +# #22208 remove extension key +ProblemFilters.exclude[MissingClassProblem]("akka.event.Logging$Extension$") diff --git a/akka-actor/src/main/mima-filters/2.5.1.backwards.excludes b/akka-actor/src/main/mima-filters/2.5.1.backwards.excludes new file mode 100644 index 0000000000..ccb34d2ddc --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.5.1.backwards.excludes @@ -0,0 +1,19 @@ +# #22794 watchWith +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.ActorContext.watchWith") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.DeathWatch.watchWith") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.DeathWatch.akka$actor$dungeon$DeathWatch$$watching") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.DeathWatch.akka$actor$dungeon$DeathWatch$$watching_=") + +# #22881 Make sure connections are aborted correctly on Windows +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.io.ChannelRegistration.cancel") + +# #21213 Feature request: Let BackoffSupervisor reply to messages when its child is stopped +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffSupervisor.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOptionsImpl.copy") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOptionsImpl.this") +ProblemFilters.exclude[MissingTypesProblem]("akka.pattern.BackoffOptionsImpl$") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOptionsImpl.apply") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOnRestartSupervisor.this") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.HandleBackoff.replyWhileStopped") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.BackoffOptions.withReplyWhileStopped") + diff --git a/akka-actor/src/main/mima-filters/2.5.2.backwards.excludes b/akka-actor/src/main/mima-filters/2.5.2.backwards.excludes new file mode 100644 index 0000000000..119e2be8c2 --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.5.2.backwards.excludes @@ -0,0 +1,2 @@ +# #22881 Make sure connections are aborted correctly on Windows +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.io.ChannelRegistration.cancel") diff --git a/akka-actor/src/main/mima-filters/2.5.3.backwards.excludes b/akka-actor/src/main/mima-filters/2.5.3.backwards.excludes new file mode 100644 index 0000000000..812673c70b --- /dev/null +++ b/akka-actor/src/main/mima-filters/2.5.3.backwards.excludes @@ -0,0 +1,4 @@ +# #15733 Timers +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.FSM#Timer.copy") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.FSM#Timer.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.FSM#Timer.apply") diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 6d4039c416..a5ce7cd7e3 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -335,6 +335,7 @@ akka { # - "default-executor" requires a "default-executor" section # - "fork-join-executor" requires a "fork-join-executor" section # - "thread-pool-executor" requires a "thread-pool-executor" section + # - "affinity-pool-executor" requires an "affinity-pool-executor" section # - A FQCN of a class extending ExecutorServiceConfigurator executor = "default-executor" @@ -350,6 +351,78 @@ akka { fallback = "fork-join-executor" } + # This will be used if you have set "executor = "affinity-pool-executor"" + # Underlying thread pool implementation is akka.dispatch.affinity.AffinityPool. + # This executor is classified as "ApiMayChange". + affinity-pool-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 4 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 0.8 + + # Max number of threads to cap factor-based parallelism number to. + parallelism-max = 64 + + # Each worker in the pool uses a separate bounded MPSC queue. This value + # indicates the upper bound of the queue. Whenever an attempt to enqueue + # a task is made and the queue does not have capacity to accomodate + # the task, the rejection handler created by the rejection handler specified + # in "rejection-handler" is invoked. + task-queue-size = 512 + + # FQCN of the Rejection handler used in the pool. + # Must have an empty public constructor and must + # implement akka.actor.affinity.RejectionHandlerFactory. + rejection-handler = "akka.dispatch.affinity.ThrowOnOverflowRejectionHandler" + + # Level of CPU time used, on a scale between 1 and 10, during backoff/idle. + # The tradeoff is that to have low latency more CPU time must be used to be + # able to react quickly on incoming messages or send as fast as possible after + # backoff backpressure. + # Level 1 strongly prefer low CPU consumption over low latency. + # Level 10 strongly prefer low latency over low CPU consumption. + idle-cpu-level = 5 + + # FQCN of the akka.dispatch.affinity.QueueSelectorFactory. + # The Class of the FQCN must have a public constructor with a + # (com.typesafe.config.Config) parameter. + # A QueueSelectorFactory create instances of akka.dispatch.affinity.QueueSelector, + # that is responsible for determining which task queue a Runnable should be enqueued in. + queue-selector = "akka.dispatch.affinity.FairDistributionHashCache" + + # When using the "akka.dispatch.affinity.FairDistributionHashCache" queue selector + # internally the AffinityPool uses two methods to determine which task + # queue to allocate a Runnable to: + # - map based - maintains a round robin counter and a map of Runnable + # hashcodes to queues that they have been associated with. This ensures + # maximum fairness in terms of work distribution, meaning that each worker + # will get approximately equal amount of mailboxes to execute. This is suitable + # in cases where we have a small number of actors that will be scheduled on + # the pool and we want to ensure the maximum possible utilization of the + # available threads. + # - hash based - the task - queue in which the runnable should go is determined + # by using an uniformly distributed int to int hash function which uses the + # hash code of the Runnable as an input. This is preferred in situations where we + # have enough number of distinct actors to ensure statistically uniform + # distribution of work across threads or we are ready to sacrifice the + # former for the added benefit of avoiding map look-ups. + fair-work-distribution { + # The value serves as a threshold which determines the point at which the + # pool switches from the first to the second work distribution schemes. + # For example, if the value is set to 128, the pool can observe up to + # 128 unique actors and schedule their mailboxes using the map based + # approach. Once this number is reached the pool switches to hash based + # task distribution mode. If the value is set to 0, the map based + # work distribution approach is disabled and only the hash based is + # used irrespective of the number of unique actors. Valid range is + # 0 to 2048 (inclusive) + threshold = 128 + } + } + # This will be used if you have set "executor = "fork-join-executor"" # Underlying thread pool implementation is akka.dispatch.forkjoin.ForkJoinPool fork-join-executor { diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 85c7f4132b..fab26a2217 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -272,6 +272,7 @@ class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaExc */ @SerialVersionUID(1L) final case class UnhandledMessage(@BeanProperty message: Any, @BeanProperty sender: ActorRef, @BeanProperty recipient: ActorRef) + extends NoSerializationVerificationNeeded /** * Classes for passing status back to the sender. diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index 30ee513de2..d17bcc9a73 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -54,8 +54,8 @@ object ActorPath { * Parse string as actor path; throws java.net.MalformedURLException if unable to do so. */ def fromString(s: String): ActorPath = s match { - case ActorPathExtractor(addr, elems) ⇒ RootActorPath(addr) / elems - case _ ⇒ throw new MalformedURLException("cannot parse as ActorPath: " + s) + case ActorPathExtractor(address, elems) ⇒ RootActorPath(address) / elems + case _ ⇒ throw new MalformedURLException("cannot parse as ActorPath: " + s) } private final val ValidSymbols = """-_.*$+:@&=,!~';""" @@ -367,10 +367,10 @@ final class ChildActorPath private[akka] (val parent: ActorPath, val name: Strin appendUidFragment(sb).toString } - private def addressStringLengthDiff(addr: Address): Int = { + private def addressStringLengthDiff(address: Address): Int = { val r = root if (r.address.host.isDefined) 0 - else (addr.toString.length - r.address.toString.length) + else (address.toString.length - r.address.toString.length) } /** diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 9d9d9e9126..3d77a186a6 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -159,7 +159,7 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable /** * This trait represents the Scala Actor API - * There are implicit conversions in ../actor/Implicits.scala + * There are implicit conversions in package.scala * from ActorRef -> ScalaActorRef and back */ trait ScalaActorRef { ref: ActorRef ⇒ diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 3f909b031d..9390b2d5c3 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -9,6 +9,7 @@ import scala.collection.mutable import akka.routing.{ Deafen, Listen, Listeners } import scala.concurrent.duration.FiniteDuration import scala.concurrent.duration._ +import akka.annotation.InternalApi object FSM { @@ -87,8 +88,9 @@ object FSM { /** * INTERNAL API */ - // FIXME: what about the cancellable? - private[akka] final case class Timer(name: String, msg: Any, repeat: Boolean, generation: Int)(context: ActorContext) + @InternalApi + private[akka] final case class Timer(name: String, msg: Any, repeat: Boolean, generation: Int, + owner: AnyRef)(context: ActorContext) extends NoSerializationVerificationNeeded { private var ref: Option[Cancellable] = _ private val scheduler = context.system.scheduler @@ -419,7 +421,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { if (timers contains name) { timers(name).cancel } - val timer = Timer(name, msg, repeat, timerGen.next)(context) + val timer = Timer(name, msg, repeat, timerGen.next, this)(context) timer.schedule(self, timeout) timers(name) = timer } @@ -616,8 +618,8 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { if (generation == gen) { processMsg(StateTimeout, "state timeout") } - case t @ Timer(name, msg, repeat, gen) ⇒ - if ((timers contains name) && (timers(name).generation == gen)) { + case t @ Timer(name, msg, repeat, gen, owner) ⇒ + if ((owner eq this) && (timers contains name) && (timers(name).generation == gen)) { if (timeoutFuture.isDefined) { timeoutFuture.get.cancel() timeoutFuture = None @@ -781,10 +783,10 @@ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ private[akka] abstract override def processEvent(event: Event, source: AnyRef): Unit = { if (debugEvent) { val srcstr = source match { - case s: String ⇒ s - case Timer(name, _, _, _) ⇒ "timer " + name - case a: ActorRef ⇒ a.toString - case _ ⇒ "unknown" + case s: String ⇒ s + case Timer(name, _, _, _, _) ⇒ "timer " + name + case a: ActorRef ⇒ a.toString + case _ ⇒ "unknown" } log.debug("processing {} from {} in state {}", event, srcstr, stateName) } diff --git a/akka-actor/src/main/scala/akka/actor/Timers.scala b/akka-actor/src/main/scala/akka/actor/Timers.scala new file mode 100644 index 0000000000..387212cbe6 --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/Timers.scala @@ -0,0 +1,119 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ +package akka.actor + +import scala.concurrent.duration.FiniteDuration +import akka.annotation.DoNotInherit +import akka.util.OptionVal + +/** + * Scala API: Mix in Timers into your Actor to get support for scheduled + * `self` messages via [[TimerScheduler]]. + * + * Timers are bound to the lifecycle of the actor that owns it, + * and thus are cancelled automatically when it is restarted or stopped. + */ +trait Timers extends Actor { + + private val _timers = new TimerSchedulerImpl(context) + + /** + * Start and cancel timers via the enclosed `TimerScheduler`. + */ + final def timers: TimerScheduler = _timers + + override protected[akka] def aroundPreRestart(reason: Throwable, message: Option[Any]): Unit = { + timers.cancelAll() + super.aroundPreRestart(reason, message) + } + + override protected[akka] def aroundPostStop(): Unit = { + timers.cancelAll() + super.aroundPostStop() + } + + override protected[akka] def aroundReceive(receive: Actor.Receive, msg: Any): Unit = { + msg match { + case timerMsg: TimerSchedulerImpl.TimerMsg ⇒ + _timers.interceptTimerMsg(timerMsg) match { + case OptionVal.Some(m) ⇒ super.aroundReceive(receive, m) + case OptionVal.None ⇒ // discard + } + case _ ⇒ + super.aroundReceive(receive, msg) + } + } + +} + +/** + * Java API: Support for scheduled `self` messages via [[TimerScheduler]]. + * + * Timers are bound to the lifecycle of the actor that owns it, + * and thus are cancelled automatically when it is restarted or stopped. + */ +abstract class AbstractActorWithTimers extends AbstractActor with Timers { + /** + * Start and cancel timers via the enclosed `TimerScheduler`. + */ + final def getTimers: TimerScheduler = timers +} + +/** + * Support for scheduled `self` messages in an actor. + * It is used by mixing in trait `Timers` in Scala or extending `AbstractActorWithTimers` + * in Java. + * + * Timers are bound to the lifecycle of the actor that owns it, + * and thus are cancelled automatically when it is restarted or stopped. + * + * `TimerScheduler` is not thread-safe, i.e. it must only be used within + * the actor that owns it. + */ +@DoNotInherit abstract class TimerScheduler { + + /** + * Start a periodic timer that will send `msg` to the `self` actor at + * a fixed `interval`. + * + * Each timer has a key and if a new timer with same key is started + * the previous is cancelled and it's guaranteed that a message from the + * previous timer is not received, even though it might already be enqueued + * in the mailbox when the new timer is started. + */ + def startPeriodicTimer(key: Any, msg: Any, interval: FiniteDuration): Unit + + /** + * Start a timer that will send `msg` once to the `self` actor after + * the given `timeout`. + * + * Each timer has a key and if a new timer with same key is started + * the previous is cancelled and it's guaranteed that a message from the + * previous timer is not received, even though it might already be enqueued + * in the mailbox when the new timer is started. + */ + def startSingleTimer(key: Any, msg: Any, timeout: FiniteDuration): Unit + + /** + * Check if a timer with a given `key` is active. + */ + def isTimerActive(key: Any): Boolean + + /** + * Cancel a timer with a given `key`. + * If canceling a timer that was already canceled, or key never was used to start a timer + * this operation will do nothing. + * + * It is guaranteed that a message from a canceled timer, including its previous incarnation + * for the same key, will not be received by the actor, even though the message might already + * be enqueued in the mailbox when cancel is called. + */ + def cancel(key: Any): Unit + + /** + * Cancel all timers. + */ + def cancelAll(): Unit + +} diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala b/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala new file mode 100644 index 0000000000..a2ba58c670 --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala @@ -0,0 +1,111 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ +package akka.actor + +import scala.concurrent.duration.FiniteDuration + +import akka.annotation.InternalApi +import akka.event.Logging +import akka.util.OptionVal + +/** + * INTERNAL API + */ +@InternalApi private[akka] object TimerSchedulerImpl { + final case class Timer(key: Any, msg: Any, repeat: Boolean, generation: Int, task: Cancellable) + final case class TimerMsg(key: Any, generation: Int, owner: TimerSchedulerImpl) + extends NoSerializationVerificationNeeded +} + +/** + * INTERNAL API + */ +@InternalApi private[akka] class TimerSchedulerImpl(ctx: ActorContext) extends TimerScheduler { + import TimerSchedulerImpl._ + + private val log = Logging(ctx.system, classOf[TimerScheduler]) + private var timers: Map[Any, Timer] = Map.empty + private var timerGen = 0 + private def nextTimerGen(): Int = { + timerGen += 1 + timerGen + } + + override def startPeriodicTimer(key: Any, msg: Any, interval: FiniteDuration): Unit = + startTimer(key, msg, interval, repeat = true) + + override def startSingleTimer(key: Any, msg: Any, timeout: FiniteDuration): Unit = + startTimer(key, msg, timeout, repeat = false) + + private def startTimer(key: Any, msg: Any, timeout: FiniteDuration, repeat: Boolean): Unit = { + timers.get(key) match { + case Some(t) ⇒ cancelTimer(t) + case None ⇒ + } + val nextGen = nextTimerGen() + + val timerMsg = TimerMsg(key, nextGen, this) + val task = + if (repeat) + ctx.system.scheduler.schedule(timeout, timeout, ctx.self, timerMsg)(ctx.dispatcher) + else + ctx.system.scheduler.scheduleOnce(timeout, ctx.self, timerMsg)(ctx.dispatcher) + + val nextTimer = Timer(key, msg, repeat, nextGen, task) + log.debug("Start timer [{}] with generation [{}]", key, nextGen) + timers = timers.updated(key, nextTimer) + } + + override def isTimerActive(key: Any): Boolean = + timers.contains(key) + + override def cancel(key: Any): Unit = { + timers.get(key) match { + case None ⇒ // already removed/canceled + case Some(t) ⇒ cancelTimer(t) + } + } + + private def cancelTimer(timer: Timer): Unit = { + log.debug("Cancel timer [{}] with generation [{}]", timer.key, timer.generation) + timer.task.cancel() + timers -= timer.key + } + + override def cancelAll(): Unit = { + log.debug("Cancel all timers") + timers.valuesIterator.foreach { timer ⇒ + timer.task.cancel() + } + timers = Map.empty + } + + def interceptTimerMsg(timerMsg: TimerMsg): OptionVal[AnyRef] = { + timers.get(timerMsg.key) match { + case None ⇒ + // it was from canceled timer that was already enqueued in mailbox + log.debug("Received timer [{}] that has been removed, discarding", timerMsg.key) + OptionVal.None // message should be ignored + case Some(t) ⇒ + if (timerMsg.owner ne this) { + // after restart, it was from an old instance that was enqueued in mailbox before canceled + log.debug("Received timer [{}] from old restarted instance, discarding", timerMsg.key) + OptionVal.None // message should be ignored + } else if (timerMsg.generation == t.generation) { + // valid timer + log.debug("Received timer [{}]", timerMsg.key) + if (!t.repeat) + timers -= t.key + OptionVal.Some(t.msg.asInstanceOf[AnyRef]) + } else { + // it was from an old timer that was enqueued in mailbox before canceled + log.debug( + "Received timer [{}] from from old generation [{}], expected generation [{}], discarding", + timerMsg.key, timerMsg.generation, t.generation) + OptionVal.None // message should be ignored + } + } + } + +} diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 285337dc4d..28dd212522 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -8,11 +8,13 @@ import java.util.concurrent._ import java.{ util ⇒ ju } import akka.actor._ +import akka.dispatch.affinity.AffinityPoolConfigurator import akka.dispatch.sysmsg._ import akka.event.EventStream import akka.event.Logging.{ Debug, Error, LogEventException } import akka.util.{ Index, Unsafe } import com.typesafe.config.Config + import scala.annotation.tailrec import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor } import scala.concurrent.duration.{ Duration, FiniteDuration } @@ -327,6 +329,8 @@ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites: def configurator(executor: String): ExecutorServiceConfigurator = executor match { case null | "" | "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) + case "affinity-pool-executor" ⇒ new AffinityPoolConfigurator(config.getConfig("affinity-pool-executor"), prerequisites) + case fqcn ⇒ val args = List( classOf[Config] → config, diff --git a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala new file mode 100644 index 0000000000..7ab837e74f --- /dev/null +++ b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala @@ -0,0 +1,418 @@ +/** + * Copyright (C) 2016-2017 Lightbend Inc. + */ + +package akka.dispatch.affinity + +import java.lang.invoke.MethodHandles +import java.lang.invoke.MethodType.methodType +import java.util.Collections +import java.util.concurrent.TimeUnit.MICROSECONDS +import java.util.concurrent._ +import java.util.concurrent.atomic.{ AtomicInteger, AtomicReference } +import java.util.concurrent.locks.LockSupport +import java.lang.Integer.reverseBytes + +import akka.dispatch._ +import akka.util.Helpers.Requiring +import com.typesafe.config.Config + +import akka.annotation.{ InternalApi, ApiMayChange } +import akka.event.Logging +import akka.util.{ ImmutableIntMap, OptionVal, ReentrantGuard } + +import scala.annotation.{ tailrec, switch } +import scala.collection.{ mutable, immutable } +import scala.util.control.NonFatal + +@InternalApi +@ApiMayChange +private[affinity] object AffinityPool { + type PoolState = Int + // PoolState: waiting to be initialized + final val Uninitialized = 0 + // PoolState: currently in the process of initializing + final val Initializing = 1 + // PoolState: accepts new tasks and processes tasks that are enqueued + final val Running = 2 + // PoolState: does not accept new tasks, processes tasks that are in the queue + final val ShuttingDown = 3 + // PoolState: does not accept new tasks, does not process tasks in queue + final val ShutDown = 4 + // PoolState: all threads have been stopped, does not process tasks and does not accept new ones + final val Terminated = 5 + + // Method handle to JDK9+ onSpinWait method + private val onSpinWaitMethodHandle = + try + OptionVal.Some(MethodHandles.lookup.findStatic(classOf[Thread], "onSpinWait", methodType(classOf[Unit]))) + catch { + case NonFatal(_) ⇒ OptionVal.None + } + + type IdleState = Int + // IdleState: Initial state + final val Initial = 0 + // IdleState: Spinning + final val Spinning = 1 + // IdleState: Yielding + final val Yielding = 2 + // IdleState: Parking + final val Parking = 3 + + // Following are auxiliary class and trait definitions + private final class IdleStrategy(idleCpuLevel: Int) { + + private[this] val maxSpins = 1100 * idleCpuLevel - 1000 + private[this] val maxYields = 5 * idleCpuLevel + private[this] val minParkPeriodNs = 1 + private[this] val maxParkPeriodNs = MICROSECONDS.toNanos(250 - ((80 * (idleCpuLevel - 1)) / 3)) + + private[this] var state: IdleState = Initial + private[this] var turns = 0L + private[this] var parkPeriodNs = 0L + @volatile private[this] var idling = false + + @inline private[this] final def transitionTo(newState: IdleState): Unit = { + state = newState + turns = 0 + } + + final def isIdling: Boolean = idling + + final def idle(): Unit = { + (state: @switch) match { + case Initial ⇒ + idling = true + transitionTo(Spinning) + case Spinning ⇒ + onSpinWaitMethodHandle match { + case OptionVal.Some(m) ⇒ m.invokeExact() + case OptionVal.None ⇒ + } + turns += 1 + if (turns > maxSpins) + transitionTo(Yielding) + case Yielding ⇒ + turns += 1 + if (turns > maxYields) { + parkPeriodNs = minParkPeriodNs + transitionTo(Parking) + } else Thread.`yield`() + case Parking ⇒ + LockSupport.parkNanos(parkPeriodNs) + parkPeriodNs = Math.min(parkPeriodNs << 1, maxParkPeriodNs) + } + } + + final def reset(): Unit = { + idling = false + transitionTo(Initial) + } + } + + private final class BoundedAffinityTaskQueue(capacity: Int) extends AbstractBoundedNodeQueue[Runnable](capacity) +} + +/** + * An [[ExecutorService]] implementation which pins actor to particular threads + * and guaranteed that an actor's [[Mailbox]] will e run on the thread it used + * it used to run. In situations where we see a lot of cache ping pong, this + * might lead to significant performance improvements. + * + * INTERNAL API + */ +@InternalApi +@ApiMayChange +private[akka] class AffinityPool( + id: String, + parallelism: Int, + affinityGroupSize: Int, + threadFactory: ThreadFactory, + idleCpuLevel: Int, + final val queueSelector: QueueSelector, + rejectionHandler: RejectionHandler) + extends AbstractExecutorService { + + if (parallelism <= 0) + throw new IllegalArgumentException("Size of pool cannot be less or equal to 0") + + import AffinityPool._ + + // Held while starting/shutting down workers/pool in order to make + // the operations linear and enforce atomicity. An example of that would be + // adding a worker. We want the creation of the worker, addition + // to the set and starting to worker to be an atomic action. Using + // a concurrent set would not give us that + private val bookKeepingLock = new ReentrantGuard() + + // condition used for awaiting termination + private val terminationCondition = bookKeepingLock.newCondition() + + // indicates the current state of the pool + @volatile final private var poolState: PoolState = Uninitialized + + private[this] final val workQueues = Array.fill(parallelism)(new BoundedAffinityTaskQueue(affinityGroupSize)) + private[this] final val workers = mutable.Set[AffinityPoolWorker]() + + def start(): this.type = + bookKeepingLock.withGuard { + if (poolState == Uninitialized) { + poolState = Initializing + workQueues.foreach(q ⇒ addWorker(workers, q)) + poolState = Running + } + this + } + + // WARNING: Only call while holding the bookKeepingLock + private def addWorker(workers: mutable.Set[AffinityPoolWorker], q: BoundedAffinityTaskQueue): Unit = { + val worker = new AffinityPoolWorker(q, new IdleStrategy(idleCpuLevel)) + workers.add(worker) + worker.start() + } + + /** + * Each worker should go through that method while terminating. + * In turn each worker is responsible for modifying the pool + * state accordingly. For example if this is the last worker + * and the queue is empty and we are in a ShuttingDown state + * the worker can transition the pool to ShutDown and attempt + * termination + * + * Furthermore, if this worker has experienced abrupt termination + * due to an exception being thrown in user code, the worker is + * responsible for adding one more worker to compensate for its + * own termination + * + */ + private def onWorkerExit(w: AffinityPoolWorker, abruptTermination: Boolean): Unit = + bookKeepingLock.withGuard { + workers.remove(w) + if (abruptTermination && poolState == Running) + addWorker(workers, w.q) + else if (workers.isEmpty && !abruptTermination && poolState >= ShuttingDown) { + poolState = ShutDown // transition to shutdown and try to transition to termination + attemptPoolTermination() + } + } + + override def execute(command: Runnable): Unit = { + val queue = workQueues(queueSelector.getQueue(command, parallelism)) // Will throw NPE if command is null + if (poolState >= ShuttingDown || !queue.add(command)) + rejectionHandler.reject(command, this) + } + + override def awaitTermination(timeout: Long, unit: TimeUnit): Boolean = { + // recurse until pool is terminated or time out reached + @tailrec + def awaitTermination(nanos: Long): Boolean = { + if (poolState == Terminated) true + else if (nanos <= 0) false + else awaitTermination(terminationCondition.awaitNanos(nanos)) + } + + bookKeepingLock.withGuard { + // need to hold the lock to avoid monitor exception + awaitTermination(unit.toNanos(timeout)) + } + } + + // WARNING: Only call while holding the bookKeepingLock + private def attemptPoolTermination(): Unit = + if (workers.isEmpty && poolState == ShutDown) { + poolState = Terminated + terminationCondition.signalAll() + } + + override def shutdownNow(): java.util.List[Runnable] = + bookKeepingLock.withGuard { + poolState = ShutDown + workers.foreach(_.stop()) + attemptPoolTermination() + // like in the FJ executor, we do not provide facility to obtain tasks that were in queue + Collections.emptyList[Runnable]() + } + + override def shutdown(): Unit = + bookKeepingLock.withGuard { + poolState = ShuttingDown + // interrupts only idle workers.. so others can process their queues + workers.foreach(_.stopIfIdle()) + attemptPoolTermination() + } + + override def isShutdown: Boolean = poolState >= ShutDown + + override def isTerminated: Boolean = poolState == Terminated + + override def toString: String = + s"${Logging.simpleName(this)}(id = $id, parallelism = $parallelism, affinityGroupSize = $affinityGroupSize, threadFactory = $threadFactory, idleCpuLevel = $idleCpuLevel, queueSelector = $queueSelector, rejectionHandler = $rejectionHandler)" + + private[this] final class AffinityPoolWorker( final val q: BoundedAffinityTaskQueue, final val idleStrategy: IdleStrategy) extends Runnable { + final val thread: Thread = threadFactory.newThread(this) + + final def start(): Unit = + if (thread eq null) throw new IllegalStateException(s"Was not able to allocate worker thread for ${AffinityPool.this}") + else thread.start() + + override final def run(): Unit = { + // Returns true if it executed something, false otherwise + def executeNext(): Boolean = { + val c = q.poll() + val next = c ne null + if (next) { + c.run() + idleStrategy.reset() + } else { + idleStrategy.idle() // if not wait for a bit + } + next + } + + /** + * We keep running as long as we are Running + * or we're ShuttingDown but we still have tasks to execute, + * and we're not interrupted. + */ + @tailrec def runLoop(): Unit = + if (!Thread.interrupted()) { + (poolState: @switch) match { + case Uninitialized ⇒ () + case Initializing | Running ⇒ + executeNext() + runLoop() + case ShuttingDown ⇒ + if (executeNext()) runLoop() + else () + case ShutDown | Terminated ⇒ () + } + } + + var abruptTermination = true + try { + runLoop() + abruptTermination = false // if we have reached here, our termination is not due to an exception + } finally { + onWorkerExit(this, abruptTermination) + } + } + + def stop(): Unit = if (!thread.isInterrupted) thread.interrupt() + + def stopIfIdle(): Unit = if (idleStrategy.isIdling) stop() + } +} + +/** + * INTERNAL API + */ +@InternalApi +@ApiMayChange +private[akka] final class AffinityPoolConfigurator(config: Config, prerequisites: DispatcherPrerequisites) + extends ExecutorServiceConfigurator(config, prerequisites) { + + private val poolSize = ThreadPoolConfig.scaledPoolSize( + config.getInt("parallelism-min"), + config.getDouble("parallelism-factor"), + config.getInt("parallelism-max")) + private val taskQueueSize = config.getInt("task-queue-size") + + private val idleCpuLevel = config.getInt("idle-cpu-level").requiring(level ⇒ + 1 <= level && level <= 10, "idle-cpu-level must be between 1 and 10") + + private val queueSelectorFactoryFQCN = config.getString("queue-selector") + private val queueSelectorFactory: QueueSelectorFactory = + prerequisites.dynamicAccess.createInstanceFor[QueueSelectorFactory](queueSelectorFactoryFQCN, immutable.Seq(classOf[Config] → config)) + .recover({ + case exception ⇒ throw new IllegalArgumentException( + s"Cannot instantiate QueueSelectorFactory(queueSelector = $queueSelectorFactoryFQCN), make sure it has an accessible constructor which accepts a Config parameter") + }).get + + private val rejectionHandlerFactoryFCQN = config.getString("rejection-handler") + private val rejectionHandlerFactory = prerequisites.dynamicAccess + .createInstanceFor[RejectionHandlerFactory](rejectionHandlerFactoryFCQN, Nil).recover({ + case exception ⇒ throw new IllegalArgumentException( + s"Cannot instantiate RejectionHandlerFactory(rejection-handler = $rejectionHandlerFactoryFCQN), make sure it has an accessible empty constructor", + exception) + }).get + + override def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = + new ExecutorServiceFactory { + override def createExecutorService: ExecutorService = + new AffinityPool(id, poolSize, taskQueueSize, threadFactory, idleCpuLevel, queueSelectorFactory.create(), rejectionHandlerFactory.create()).start() + } +} + +trait RejectionHandler { + def reject(command: Runnable, service: ExecutorService) +} + +trait RejectionHandlerFactory { + def create(): RejectionHandler +} + +trait QueueSelectorFactory { + def create(): QueueSelector +} + +/** + * A `QueueSelector` is responsible for, given a `Runnable` and the number of available + * queues, return which of the queues that `Runnable` should be placed in. + */ +trait QueueSelector { + /** + * Must be deterministic—return the same value for the same input. + * @returns given a `Runnable` a number between 0 .. `queues` (exclusive) + * @throws NullPointerException when `command` is `null` + */ + def getQueue(command: Runnable, queues: Int): Int +} + +/** + * INTERNAL API + */ +@InternalApi +@ApiMayChange +private[akka] final class ThrowOnOverflowRejectionHandler extends RejectionHandlerFactory with RejectionHandler { + override final def reject(command: Runnable, service: ExecutorService): Unit = + throw new RejectedExecutionException(s"Task $command rejected from $service") + override final def create(): RejectionHandler = this +} + +/** + * INTERNAL API + */ +@InternalApi +@ApiMayChange +private[akka] final class FairDistributionHashCache( final val config: Config) extends QueueSelectorFactory { + private final val MaxFairDistributionThreshold = 2048 + + private[this] final val fairDistributionThreshold = config.getInt("fair-work-distribution.threshold").requiring(thr ⇒ + 0 <= thr && thr <= MaxFairDistributionThreshold, s"fair-work-distribution.threshold must be between 0 and $MaxFairDistributionThreshold") + + override final def create(): QueueSelector = new AtomicReference[ImmutableIntMap](ImmutableIntMap.empty) with QueueSelector { + override def toString: String = s"FairDistributionHashCache(fairDistributionThreshold = $fairDistributionThreshold)" + private[this] final def improve(h: Int): Int = Math.abs(reverseBytes(h * 0x9e3775cd) * 0x9e3775cd) // `sbhash`: In memory of Phil Bagwell. + override final def getQueue(command: Runnable, queues: Int): Int = { + val runnableHash = command.hashCode() + if (fairDistributionThreshold == 0) + improve(runnableHash) % queues + else { + @tailrec + def cacheLookup(prev: ImmutableIntMap, hash: Int): Int = { + val existingIndex = prev.get(runnableHash) + if (existingIndex >= 0) existingIndex + else if (prev.size > fairDistributionThreshold) improve(hash) % queues + else { + val index = prev.size % queues + if (compareAndSet(prev, prev.updated(runnableHash, index))) index + else cacheLookup(get(), hash) + } + } + cacheLookup(get(), runnableHash) + } + } + } +} + diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index ff17673478..86ac09c7c2 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -8,6 +8,7 @@ import java.util.concurrent.atomic.AtomicInteger import akka.actor.ActorSystem.Settings import akka.actor._ +import akka.annotation.{ DoNotInherit, InternalApi } import akka.dispatch.RequiresMessageQueue import akka.event.Logging._ import akka.util.ReentrantGuard @@ -1403,7 +1404,9 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter { def clearMDC(): Unit = mdc(emptyMDC) } -final class LogMarker(val name: String) +/** DO NOT INHERIT: Class is open only for use by akka-slf4j*/ +@DoNotInherit +class LogMarker(val name: String) object LogMarker { /** The Marker is internally transferred via MDC using using this key */ private[akka] final val MDCKey = "marker" diff --git a/akka-actor/src/main/scala/akka/io/Dns.scala b/akka-actor/src/main/scala/akka/io/Dns.scala index 7c15a9a383..b6901fe16b 100644 --- a/akka-actor/src/main/scala/akka/io/Dns.scala +++ b/akka-actor/src/main/scala/akka/io/Dns.scala @@ -30,8 +30,8 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { @throws[UnknownHostException] def addr: InetAddress = addrOption match { - case Some(addr) ⇒ addr - case None ⇒ throw new UnknownHostException(name) + case Some(ipAddress) ⇒ ipAddress + case None ⇒ throw new UnknownHostException(name) } } @@ -96,4 +96,4 @@ object IpVersionSelector { case "true" ⇒ ipv6 orElse ipv4 case _ ⇒ ipv4 orElse ipv6 } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala index cdd4516b7b..6b23c16cb4 100644 --- a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala +++ b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala @@ -68,6 +68,12 @@ private[io] trait ChannelRegistration extends NoSerializationVerificationNeeded } private[io] object SelectionHandler { + // Let select return every MaxSelectMillis which will automatically cleanup stale entries in the selection set. + // Otherwise, an idle Selector might block for a long time keeping a reference to the dead connection actor's ActorRef + // which might keep other stuff in memory. + // See https://github.com/akka/akka/issues/23437 + // As this is basic house-keeping functionality it doesn't seem useful to make the value configurable. + val MaxSelectMillis = 10000 // wake up once in 10 seconds trait HasFailureMessage { def failureMessage: Any @@ -119,7 +125,7 @@ private[io] object SelectionHandler { private[this] val select = new Task { def tryRun(): Unit = { - if (selector.select() > 0) { // This assumes select return value == selectedKeys.size + if (selector.select(MaxSelectMillis) > 0) { // This assumes select return value == selectedKeys.size val keys = selector.selectedKeys val iterator = keys.iterator() while (iterator.hasNext) { diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index bcc20cd31f..0286495d97 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -3,18 +3,28 @@ */ package akka.pattern -import akka.actor.{ ActorSelection, Scheduler } -import java.util.concurrent.{ Callable, TimeUnit } -import scala.concurrent.ExecutionContext -import scala.concurrent.duration.FiniteDuration -import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters._ +import java.util.concurrent.{ Callable, CompletionStage, TimeUnit } +import akka.actor.{ ActorSelection, Scheduler } + +import scala.compat.java8.FutureConverters._ +import scala.concurrent.ExecutionContext + +/** + * "Pre Java 8" Java API for Akka patterns such as `ask`, `pipe` and others. + * + * These methods are possible to call from Java however work with the Scala [[scala.concurrent.Future]], + * due to the lack of non-blocking reactive Future implementation before Java 8. + * + * For Java applications developed with Java 8 and later, you might want to use [[akka.pattern.PatternsCS]] instead, + * which provide alternatives for these patterns which work with [[java.util.concurrent.CompletionStage]]. + */ object Patterns { + import akka.actor.ActorRef import akka.japi - import akka.actor.{ ActorRef } - import akka.pattern.{ ask ⇒ scalaAsk, pipe ⇒ scalaPipe, gracefulStop ⇒ scalaGracefulStop, after ⇒ scalaAfter } + import akka.pattern.{ after ⇒ scalaAfter, ask ⇒ scalaAsk, gracefulStop ⇒ scalaGracefulStop, pipe ⇒ scalaPipe } import akka.util.Timeout + import scala.concurrent.Future import scala.concurrent.duration._ @@ -259,11 +269,17 @@ object Patterns { scalaAfter(duration, scheduler)(value)(context) } +/** + * Java 8+ API for Akka patterns such as `ask`, `pipe` and others which work with [[java.util.concurrent.CompletionStage]]. + * + * For working with Scala [[scala.concurrent.Future]] from Java you may want to use [[akka.pattern.Patterns]] instead. + */ object PatternsCS { + import akka.actor.ActorRef import akka.japi - import akka.actor.{ ActorRef } import akka.pattern.{ ask ⇒ scalaAsk, gracefulStop ⇒ scalaGracefulStop } import akka.util.Timeout + import scala.concurrent.duration._ /** diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index 12240e40af..db87bfb8a7 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -179,6 +179,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * using the optional type hint to the Serializer. * Returns either the resulting object or throws an exception if deserialization fails. */ + @throws(classOf[NotSerializableException]) def deserializeByteBuffer(buf: ByteBuffer, serializerId: Int, manifest: String): AnyRef = { val serializer = try getSerializerById(serializerId) catch { case _: NoSuchElementException ⇒ throw new NotSerializableException( @@ -220,6 +221,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * * Throws java.io.NotSerializableException if no `serialization-bindings` is configured for the class. */ + @throws(classOf[NotSerializableException]) def serializerFor(clazz: Class[_]): Serializer = serializerMap.get(clazz) match { case null ⇒ // bindings are ordered from most specific to least specific diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index 59225e6f94..0c92502eb7 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -4,7 +4,7 @@ package akka.serialization * Copyright (C) 2009-2017 Lightbend Inc. */ -import java.io.{ ByteArrayInputStream, ByteArrayOutputStream, ObjectOutputStream } +import java.io.{ ByteArrayInputStream, ByteArrayOutputStream, NotSerializableException, ObjectOutputStream } import java.nio.ByteBuffer import java.util.concurrent.Callable @@ -57,6 +57,7 @@ trait Serializer { * Produces an object from an array of bytes, with an optional type-hint; * the class should be loaded using ActorSystem.dynamicAccess. */ + @throws(classOf[NotSerializableException]) def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef /** @@ -67,6 +68,7 @@ trait Serializer { /** * Java API: deserialize with type hint */ + @throws(classOf[NotSerializableException]) final def fromBinary(bytes: Array[Byte], clazz: Class[_]): AnyRef = fromBinary(bytes, Option(clazz)) } @@ -135,6 +137,7 @@ abstract class SerializerWithStringManifest extends Serializer { * and message is dropped. Other exceptions will tear down the TCP connection * because it can be an indication of corrupt bytes from the underlying transport. */ + @throws(classOf[NotSerializableException]) def fromBinary(bytes: Array[Byte], manifest: String): AnyRef final def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = { @@ -194,6 +197,7 @@ trait ByteBufferSerializer { * Produces an object from a `ByteBuffer`, with an optional type-hint; * the class should be loaded using ActorSystem.dynamicAccess. */ + @throws(classOf[NotSerializableException]) def fromBinary(buf: ByteBuffer, manifest: String): AnyRef } @@ -257,6 +261,8 @@ object BaseSerializer { * the JSerializer (also possible with empty constructor). */ abstract class JSerializer extends Serializer { + + @throws(classOf[NotSerializableException]) final def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = fromBinaryJava(bytes, manifest.orNull) @@ -315,6 +321,7 @@ class JavaSerializer(val system: ExtendedActorSystem) extends BaseSerializer { bos.toByteArray } + @throws(classOf[NotSerializableException]) def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { val in = new ClassLoaderObjectInputStream(system.dynamicAccess.classLoader, new ByteArrayInputStream(bytes)) val obj = JavaSerializer.currentSystem.withValue(system) { in.readObject } @@ -344,11 +351,13 @@ final case class DisabledJavaSerializer(system: ExtendedActorSystem) extends Ser throw IllegalSerialization } + @throws(classOf[NotSerializableException]) override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { log.warning(LogMarker.Security, "Incoming message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set!") throw IllegalDeserialization } + @throws(classOf[NotSerializableException]) override def fromBinary(buf: ByteBuffer, manifest: String): AnyRef = { // we don't capture the manifest or mention it in the log as the default setting for includeManifest is set to false. log.warning(LogMarker.Security, "Incoming message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set!") @@ -376,6 +385,7 @@ class NullSerializer extends Serializer { def includeManifest: Boolean = false def identifier = 0 def toBinary(o: AnyRef): Array[Byte] = nullAsBytes + @throws(classOf[NotSerializableException]) def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = null } @@ -392,6 +402,8 @@ class ByteArraySerializer(val system: ExtendedActorSystem) extends BaseSerialize case other ⇒ throw new IllegalArgumentException( s"${getClass.getName} only serializes byte arrays, not [${other.getClass.getName}]") } + + @throws(classOf[NotSerializableException]) def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = bytes override def toBinary(o: AnyRef, buf: ByteBuffer): Unit = @@ -402,6 +414,7 @@ class ByteArraySerializer(val system: ExtendedActorSystem) extends BaseSerialize s"${getClass.getName} only serializes byte arrays, not [${other.getClass.getName}]") } + @throws(classOf[NotSerializableException]) override def fromBinary(buf: ByteBuffer, manifest: String): AnyRef = { val bytes = new Array[Byte](buf.remaining()) buf.get(bytes) diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index dfcfb549af..1127fbac83 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -60,6 +60,24 @@ object ByteString { */ def fromArray(array: Array[Byte]): ByteString = apply(array) + /** + * Unsafe API: Use only in situations you are completely confident that this is what + * you need, and that you understand the implications documented below. + * + * Creates a ByteString without copying the passed in byte array, unlike other factory + * methods defined on ByteString. This method of creating a ByteString saves one array + * copy and allocation and therefore can lead to better performance, however it also means + * that one MUST NOT modify the passed in array, or unexpected immutable data structure + * contract-breaking behaviour will manifest itself. + * + * This API is intended for users who have obtained an byte array from some other API, and + * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) + * to operate on the wrapped data. For all other intents and purposes, please use the usual + * apply and create methods - which provide the immutability guarantees by copying the array. + * + */ + def fromArrayUnsafe(array: Array[Byte]): ByteString = ByteString1C(array) + /** * Creates a new ByteString by copying length bytes starting at offset from * an Array. @@ -67,6 +85,24 @@ object ByteString { def fromArray(array: Array[Byte], offset: Int, length: Int): ByteString = CompactByteString.fromArray(array, offset, length) + /** + * Unsafe API: Use only in situations you are completely confident that this is what + * you need, and that you understand the implications documented below. + * + * Creates a ByteString without copying the passed in byte array, unlike other factory + * methods defined on ByteString. This method of creating a ByteString saves one array + * copy and allocation and therefore can lead to better performance, however it also means + * that one MUST NOT modify the passed in array, or unexpected immutable data structure + * contract-breaking behaviour will manifest itself. + * + * This API is intended for users who have obtained an byte array from some other API, and + * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) + * to operate on the wrapped data. For all other intents and purposes, please use the usual + * apply and create methods - which provide the immutability guarantees by copying the array. + * + */ + def fromArrayUnsafe(array: Array[Byte], offset: Int, length: Int): ByteString = ByteString1(array, offset, length) + /** * JAVA API * Creates a new ByteString by copying an int array by converting from integral numbers to bytes. diff --git a/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala b/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala new file mode 100644 index 0000000000..c6256037c2 --- /dev/null +++ b/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala @@ -0,0 +1,145 @@ +/** + * Copyright (C) 2016-2017 Lightbend Inc. + */ +package akka.util +import java.util.Arrays +import akka.annotation.InternalApi +import scala.annotation.tailrec + +/** + * INTERNAL API + */ +@InternalApi private[akka] object ImmutableIntMap { + final val empty: ImmutableIntMap = new ImmutableIntMap(Array.emptyIntArray, 0) +} + +/** + * INTERNAL API + * Specialized Map for primitive `Int` keys and values to avoid allocations (boxing). + * Keys and values are encoded consecutively in a single Int array and does copy-on-write with no + * structural sharing, it's intended for rather small maps (<1000 elements). + */ +@InternalApi private[akka] final class ImmutableIntMap private (private final val kvs: Array[Int], final val size: Int) { + + private final def this(key: Int, value: Int) = { + this(new Array[Int](2), 1) + kvs(0) = key + kvs(1) = value + } + + private[this] final def indexForKey(key: Int): Int = { + // Custom implementation of binary search since we encode key + value in consecutive indicies. + // We do the binary search on half the size of the array then project to the full size. + // >>> 1 for division by 2: https://research.googleblog.com/2006/06/extra-extra-read-all-about-it-nearly.html + @tailrec def find(lo: Int, hi: Int): Int = + if (lo <= hi) { + val lohi = lo + hi // Since we search in half the array we don't need to div by 2 to find the real index of key + val idx = lohi & ~1 // Since keys are in even slots, we get the key idx from lo+hi by removing the lowest bit if set (odd) + val k = kvs(idx) + if (k == key) idx + else if (k < key) find((lohi >>> 1) + 1, hi) + else /* if (k > key) */ find(lo, (lohi >>> 1) - 1) + } else ~(lo << 1) // same as -((lo*2)+1): Item should be placed, negated to indicate no match + + find(0, size - 1) + } + + /** + * Worst case `O(log n)`, allocation free. + * Will return Int.MinValue if not found, so beware of storing Int.MinValues + */ + final def get(key: Int): Int = { + // same binary search as in `indexforKey` replicated here for performance reasons. + @tailrec def find(lo: Int, hi: Int): Int = + if (lo <= hi) { + val lohi = lo + hi // Since we search in half the array we don't need to div by 2 to find the real index of key + val k = kvs(lohi & ~1) // Since keys are in even slots, we get the key idx from lo+hi by removing the lowest bit if set (odd) + if (k == key) kvs(lohi | 1) // lohi, if odd, already points to the value-index, if even, we set the lowest bit to add 1 + else if (k < key) find((lohi >>> 1) + 1, hi) + else /* if (k > key) */ find(lo, (lohi >>> 1) - 1) + } else Int.MinValue + + find(0, size - 1) + } + + /** + * Worst case `O(log n)`, allocation free. + */ + final def contains(key: Int): Boolean = indexForKey(key) >= 0 + + /** + * Worst case `O(n)`, creates new `ImmutableIntMap` + * with the given key and value if that key is not yet present in the map. + */ + final def updateIfAbsent(key: Int, value: ⇒ Int): ImmutableIntMap = + if (size > 0) { + val i = indexForKey(key) + if (i >= 0) this + else insert(key, value, i) + } else new ImmutableIntMap(key, value) + + /** + * Worst case `O(n)`, creates new `ImmutableIntMap` + * with the given key with the given value. + */ + final def updated(key: Int, value: Int): ImmutableIntMap = + if (size > 0) { + val i = indexForKey(key) + if (i >= 0) { + val valueIndex = i + 1 + if (kvs(valueIndex) != value) update(value, valueIndex) + else this // If no change no need to copy anything + } else insert(key, value, i) + } else new ImmutableIntMap(key, value) + + private[this] final def update(value: Int, valueIndex: Int): ImmutableIntMap = { + val newKvs = kvs.clone() // clone() can in theory be faster since it could do a malloc + memcpy iso. calloc etc + newKvs(valueIndex) = value + new ImmutableIntMap(newKvs, size) + } + + private[this] final def insert(key: Int, value: Int, index: Int): ImmutableIntMap = { + val at = ~index // ~n == -(n + 1): insert the entry at the right position—keep the array sorted + val newKvs = new Array[Int](kvs.length + 2) + System.arraycopy(kvs, 0, newKvs, 0, at) + newKvs(at) = key + newKvs(at + 1) = value + System.arraycopy(kvs, at, newKvs, at + 2, kvs.length - at) + new ImmutableIntMap(newKvs, size + 1) + } + + /** + * Worst case `O(n)`, creates new `ImmutableIntMap` + * without the given key. + */ + final def remove(key: Int): ImmutableIntMap = { + val i = indexForKey(key) + if (i >= 0) { + if (size > 1) { + val newSz = kvs.length - 2 + val newKvs = new Array[Int](newSz) + System.arraycopy(kvs, 0, newKvs, 0, i) + System.arraycopy(kvs, i + 2, newKvs, i, newSz - i) + new ImmutableIntMap(newKvs, size - 1) + } else ImmutableIntMap.empty + } else this + } + + /** + * All keys + */ + final def keysIterator: Iterator[Int] = + if (size < 1) Iterator.empty + else Iterator.range(0, kvs.length - 1, 2).map(kvs.apply) + + override final def toString: String = + if (size < 1) "ImmutableIntMap()" + else Iterator.range(0, kvs.length - 1, 2).map(i ⇒ s"${kvs(i)} -> ${kvs(i + 1)}").mkString("ImmutableIntMap(", ", ", ")") + + override final def hashCode: Int = Arrays.hashCode(kvs) + + override final def equals(obj: Any): Boolean = obj match { + case other: ImmutableIntMap ⇒ Arrays.equals(kvs, other.kvs) // No need to test `this eq obj` since this is done for the kvs arrays anyway + case _ ⇒ false + } +} diff --git a/akka-actor/src/main/scala/akka/util/OptionVal.scala b/akka-actor/src/main/scala/akka/util/OptionVal.scala index 7e94641633..18208fdbbb 100644 --- a/akka-actor/src/main/scala/akka/util/OptionVal.scala +++ b/akka-actor/src/main/scala/akka/util/OptionVal.scala @@ -28,7 +28,7 @@ private[akka] object OptionVal { * * Note that it can be used in pattern matching without allocations * because it has name based extractor using methods `isEmpty` and `get`. - * See http://hseeberger.github.io/blog/2013/10/04/name-based-extractors-in-scala-2-dot-11/ + * See https://hseeberger.wordpress.com/2013/10/04/name-based-extractors-in-scala-2-11/ */ private[akka] final class OptionVal[+A >: Null](val x: A) extends AnyVal { diff --git a/akka-bench-jmh/README.md b/akka-bench-jmh/README.md new file mode 100644 index 0000000000..a37cc73e31 --- /dev/null +++ b/akka-bench-jmh/README.md @@ -0,0 +1,10 @@ +# Akka Microbenchmarks + +This subproject contains some microbenchmarks excercising key parts of Akka. + +You can run them like: + + project akka-bench-jmh + jmh:run -i 3 -wi 3 -f 1 .*ActorCreationBenchmark + +Use 'jmh:run -h' to get an overview of the availabe options. diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala new file mode 100644 index 0000000000..91d2d4de13 --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala @@ -0,0 +1,94 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ +package akka.actor + +import java.util.concurrent.TimeUnit + +import akka.actor.BenchmarkActors._ +import akka.actor.ForkJoinActorBenchmark.cores +import com.typesafe.config.ConfigFactory +import org.openjdk.jmh.annotations._ + +@State(Scope.Benchmark) +@BenchmarkMode(Array(Mode.Throughput)) +@Fork(1) +@Threads(1) +@Warmup(iterations = 10, time = 5, timeUnit = TimeUnit.SECONDS, batchSize = 1) +@Measurement(iterations = 10, time = 15, timeUnit = TimeUnit.SECONDS, batchSize = 1) +class AffinityPoolComparativeBenchmark { + + @Param(Array("1")) + var throughPut = 0 + + @Param(Array("affinity-dispatcher", "default-fj-dispatcher", "fixed-size-dispatcher")) + var dispatcher = "" + + @Param(Array("SingleConsumerOnlyUnboundedMailbox")) //"default" + var mailbox = "" + + final val numThreads, numActors = 8 + final val numMessagesPerActorPair = 2000000 + final val totalNumberOfMessages = numMessagesPerActorPair * (numActors / 2) + + implicit var system: ActorSystem = _ + + @Setup(Level.Trial) + def setup(): Unit = { + + requireRightNumberOfCores(cores) + + val mailboxConf = mailbox match { + case "default" => "" + case "SingleConsumerOnlyUnboundedMailbox" => + s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}"""" + } + + system = ActorSystem("AffinityPoolComparativeBenchmark", ConfigFactory.parseString( + s"""| akka { + | log-dead-letters = off + | actor { + | default-fj-dispatcher { + | executor = "fork-join-executor" + | fork-join-executor { + | parallelism-min = $numThreads + | parallelism-factor = 1.0 + | parallelism-max = $numThreads + | } + | throughput = $throughPut + | } + | + | fixed-size-dispatcher { + | executor = "thread-pool-executor" + | thread-pool-executor { + | fixed-pool-size = $numThreads + | } + | throughput = $throughPut + | } + | + | affinity-dispatcher { + | executor = "affinity-pool-executor" + | affinity-pool-executor { + | parallelism-min = $numThreads + | parallelism-factor = 1.0 + | parallelism-max = $numThreads + | task-queue-size = 512 + | idle-cpu-level = 5 + | fair-work-distribution.threshold = 2048 + | } + | throughput = $throughPut + | } + | $mailboxConf + | } + | } + """.stripMargin + )) + } + + @TearDown(Level.Trial) + def shutdown(): Unit = tearDownSystem() + + @Benchmark + @OperationsPerInvocation(totalNumberOfMessages) + def pingPong(): Unit = benchmarkPingPongActors(numMessagesPerActorPair, numActors, dispatcher, throughPut, timeout) +} diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala new file mode 100644 index 0000000000..08d7639a56 --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala @@ -0,0 +1,68 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ +package akka.actor + +import java.util.concurrent.TimeUnit + +import akka.actor.BenchmarkActors._ +import com.typesafe.config.ConfigFactory +import org.openjdk.jmh.annotations._ + +@State(Scope.Benchmark) +@BenchmarkMode(Array(Mode.Throughput)) +@Fork(1) +@Threads(1) +@Warmup(iterations = 10, time = 5, timeUnit = TimeUnit.SECONDS, batchSize = 1) +@Measurement(iterations = 10, time = 15, timeUnit = TimeUnit.SECONDS, batchSize = 1) +class AffinityPoolIdleCPULevelBenchmark { + + final val numThreads, numActors = 8 + final val numMessagesPerActorPair = 2000000 + final val totalNumberOfMessages = numMessagesPerActorPair * (numActors / 2) + + implicit var system: ActorSystem = _ + + @Param(Array("1", "3", "5", "7", "10")) + var idleCPULevel = "" + + @Param(Array("25")) + var throughPut = 0 + + @Setup(Level.Trial) + def setup(): Unit = { + + requireRightNumberOfCores(numThreads) + + system = ActorSystem("AffinityPoolWaitingStrategyBenchmark", ConfigFactory.parseString( + s""" | akka { + | log-dead-letters = off + | actor { + | affinity-dispatcher { + | executor = "affinity-pool-executor" + | affinity-pool-executor { + | parallelism-min = $numThreads + | parallelism-factor = 1.0 + | parallelism-max = $numThreads + | task-queue-size = 512 + | idle-cpu-level = $idleCPULevel + | fair-work-distribution.threshold = 2048 + | } + | throughput = $throughPut + | } + | + | } + | } + """.stripMargin + )) + } + + @TearDown(Level.Trial) + def shutdown(): Unit = tearDownSystem() + + @Benchmark + @OutputTimeUnit(TimeUnit.NANOSECONDS) + @OperationsPerInvocation(8000000) + def pingPong(): Unit = benchmarkPingPongActors(numMessagesPerActorPair, numActors, "affinity-dispatcher", throughPut, timeout) + +} diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala new file mode 100644 index 0000000000..75f5d93e94 --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala @@ -0,0 +1,110 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ +package akka.actor + +import java.util.concurrent.{ CountDownLatch, TimeUnit } + +import akka.actor.BenchmarkActors._ +import akka.actor.ForkJoinActorBenchmark.cores +import com.typesafe.config.ConfigFactory +import org.openjdk.jmh.annotations._ + +@State(Scope.Benchmark) +@BenchmarkMode(Array(Mode.Throughput)) +@Fork(1) +@Threads(1) +@Warmup(iterations = 10, time = 15, timeUnit = TimeUnit.SECONDS, batchSize = 1) +@Measurement(iterations = 10, time = 20, timeUnit = TimeUnit.SECONDS, batchSize = 1) +class AffinityPoolRequestResponseBenchmark { + + @Param(Array("1", "5", "50")) + var throughPut = 0 + + @Param(Array("affinity-dispatcher", "default-fj-dispatcher", "fixed-size-dispatcher")) + var dispatcher = "" + + @Param(Array("SingleConsumerOnlyUnboundedMailbox")) //"default" + var mailbox = "" + + final val numThreads, numActors = 8 + final val numQueriesPerActor = 400000 + final val totalNumberOfMessages = numQueriesPerActor * numActors + final val numUsersInDB = 300000 + + implicit var system: ActorSystem = _ + + var actors: Vector[(ActorRef, ActorRef)] = null + var latch: CountDownLatch = null + + @Setup(Level.Trial) + def setup(): Unit = { + + requireRightNumberOfCores(cores) + + val mailboxConf = mailbox match { + case "default" => "" + case "SingleConsumerOnlyUnboundedMailbox" => + s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}"""" + } + + system = ActorSystem("AffinityPoolComparativeBenchmark", ConfigFactory.parseString( + s"""| akka { + | log-dead-letters = off + | actor { + | default-fj-dispatcher { + | executor = "fork-join-executor" + | fork-join-executor { + | parallelism-min = $numThreads + | parallelism-factor = 1.0 + | parallelism-max = $numThreads + | } + | throughput = $throughPut + | } + | + | fixed-size-dispatcher { + | executor = "thread-pool-executor" + | thread-pool-executor { + | fixed-pool-size = $numThreads + | } + | throughput = $throughPut + | } + | + | affinity-dispatcher { + | executor = "affinity-pool-executor" + | affinity-pool-executor { + | parallelism-min = $numThreads + | parallelism-factor = 1.0 + | parallelism-max = $numThreads + | task-queue-size = 512 + | idle-cpu-level = 5 + | fair-work-distribution.threshold = 2048 + | } + | throughput = $throughPut + | } + | $mailboxConf + | } + | } + """.stripMargin + )) + } + + @TearDown(Level.Trial) + def shutdown(): Unit = tearDownSystem() + + @Setup(Level.Invocation) + def setupActors(): Unit = { + val (_actors, _latch) = RequestResponseActors.startUserQueryActorPairs(numActors, numQueriesPerActor, numUsersInDB, dispatcher) + actors = _actors + latch = _latch + } + + @Benchmark + @OperationsPerInvocation(totalNumberOfMessages) + def queryUserServiceActor(): Unit = { + val startNanoTime = System.nanoTime() + RequestResponseActors.initiateQuerySimulation(actors, throughPut * 2) + latch.await(BenchmarkActors.timeout.toSeconds, TimeUnit.SECONDS) + BenchmarkActors.printProgress(totalNumberOfMessages, numActors, startNanoTime) + } +} diff --git a/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala b/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala new file mode 100644 index 0000000000..618ae35868 --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala @@ -0,0 +1,102 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ +package akka.actor + +import java.util.concurrent.{ CountDownLatch, TimeUnit } + +import scala.concurrent.Await +import scala.concurrent.duration.Duration +import scala.concurrent.duration._ + +object BenchmarkActors { + + val timeout = 30.seconds + + case object Message + case object Stop + + class PingPong(val messages: Int, latch: CountDownLatch) extends Actor { + var left = messages / 2 + def receive = { + case Message => + + if (left == 0) { + latch.countDown() + context stop self + } + + sender() ! Message + left -= 1 + } + } + + object PingPong { + def props(messages: Int, latch: CountDownLatch) = Props(new PingPong(messages, latch)) + } + + class Pipe(next: Option[ActorRef]) extends Actor { + def receive = { + case Message => + if (next.isDefined) next.get forward Message + case Stop => + context stop self + if (next.isDefined) next.get forward Stop + } + } + + object Pipe { + def props(next: Option[ActorRef]) = Props(new Pipe(next)) + } + + private def startPingPongActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String)(implicit system: ActorSystem) = { + val fullPathToDispatcher = "akka.actor." + dispatcher + val latch = new CountDownLatch(numPairs * 2) + val actors = for { + i <- (1 to numPairs).toVector + } yield { + val ping = system.actorOf(PingPong.props(messagesPerPair, latch).withDispatcher(fullPathToDispatcher)) + val pong = system.actorOf(PingPong.props(messagesPerPair, latch).withDispatcher(fullPathToDispatcher)) + (ping, pong) + } + (actors, latch) + } + + private def initiatePingPongForPairs(refs: Vector[(ActorRef, ActorRef)], inFlight: Int) = { + for { + (ping, pong) <- refs + _ <- 1 to inFlight + } { + ping.tell(Message, pong) + } + } + + def printProgress(totalMessages: Long, numActors: Int, startNanoTime: Long) = { + val durationMicros = (System.nanoTime() - startNanoTime) / 1000 + println(f" $totalMessages messages by $numActors actors took ${durationMicros / 1000} ms, " + + f"${totalMessages.toDouble / durationMicros}%,.2f M msg/s") + } + + def requireRightNumberOfCores(numCores: Int) = + require( + Runtime.getRuntime.availableProcessors == numCores, + s"Update the cores constant to ${Runtime.getRuntime.availableProcessors}" + ) + + def benchmarkPingPongActors(numMessagesPerActorPair: Int, numActors: Int, dispatcher: String, throughPut: Int, shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = { + val numPairs = numActors / 2 + val totalNumMessages = numPairs * numMessagesPerActorPair + val (actors, latch) = startPingPongActorPairs(numMessagesPerActorPair, numPairs, dispatcher) + val startNanoTime = System.nanoTime() + initiatePingPongForPairs(actors, inFlight = throughPut * 2) + latch.await(shutdownTimeout.toSeconds, TimeUnit.SECONDS) + printProgress(totalNumMessages, numActors, startNanoTime) + } + + def tearDownSystem()(implicit system: ActorSystem): Unit = { + system.terminate() + Await.ready(system.whenTerminated, timeout) + } + +} + diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala index ca8f5529a9..93f1d1d1a5 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala @@ -6,46 +6,61 @@ package akka.actor import akka.testkit.TestProbe import com.typesafe.config.ConfigFactory import org.openjdk.jmh.annotations._ -import scala.concurrent.duration._ import java.util.concurrent.TimeUnit import scala.concurrent.Await import scala.annotation.tailrec +import BenchmarkActors._ +import scala.concurrent.duration._ @State(Scope.Benchmark) @BenchmarkMode(Array(Mode.Throughput)) @Fork(1) @Threads(1) @Warmup(iterations = 10, time = 5, timeUnit = TimeUnit.SECONDS, batchSize = 1) -@Measurement(iterations = 20) +@Measurement(iterations = 10, time = 15, timeUnit = TimeUnit.SECONDS, batchSize = 1) class ForkJoinActorBenchmark { import ForkJoinActorBenchmark._ - @Param(Array("5")) + @Param(Array("5", "25", "50")) var tpt = 0 - @Param(Array("1")) + @Param(Array(coresStr)) // coresStr, cores2xStr, cores4xStr var threads = "" + @Param(Array("SingleConsumerOnlyUnboundedMailbox")) //"default" + var mailbox = "" + implicit var system: ActorSystem = _ @Setup(Level.Trial) def setup(): Unit = { + + requireRightNumberOfCores(cores) + + val mailboxConf = mailbox match { + case "default" => "" + case "SingleConsumerOnlyUnboundedMailbox" => + s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}"""" + } + system = ActorSystem("ForkJoinActorBenchmark", ConfigFactory.parseString( - s"""| akka { - | log-dead-letters = off - | actor { - | default-dispatcher { - | executor = "fork-join-executor" - | fork-join-executor { - | parallelism-min = 1 - | parallelism-factor = $threads - | parallelism-max = 64 - | } - | throughput = $tpt - | } - | } - | } - """.stripMargin + s""" + akka { + log-dead-letters = off + actor { + default-dispatcher { + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = $threads + parallelism-factor = 1 + parallelism-max = $threads + } + throughput = $tpt + } + $mailboxConf + } + } + """ )) } @@ -55,110 +70,31 @@ class ForkJoinActorBenchmark { Await.ready(system.whenTerminated, 15.seconds) } - var pingPongActors: Vector[(ActorRef, ActorRef)] = null - var pingPongLessActorsThanCoresActors: Vector[(ActorRef, ActorRef)] = null - var pingPongSameNumberOfActorsAsCoresActors: Vector[(ActorRef, ActorRef)] = null - var pingPongMoreActorsThanCoresActors: Vector[(ActorRef, ActorRef)] = null - - @Setup(Level.Invocation) - def setupActors(): Unit = { - pingPongActors = startActors(1) - pingPongLessActorsThanCoresActors = startActors(lessThanCoresActorPairs) - pingPongSameNumberOfActorsAsCoresActors = startActors(cores / 2) - pingPongMoreActorsThanCoresActors = startActors(moreThanCoresActorPairs) - } - - @TearDown(Level.Invocation) - def tearDownActors(): Unit = { - stopActors(pingPongActors) - stopActors(pingPongLessActorsThanCoresActors) - stopActors(pingPongSameNumberOfActorsAsCoresActors) - stopActors(pingPongMoreActorsThanCoresActors) - } - - def startActors(n: Int): Vector[(ActorRef, ActorRef)] = { - for { - i <- (1 to n).toVector - } yield { - val ping = system.actorOf(Props[ForkJoinActorBenchmark.PingPong]) - val pong = system.actorOf(Props[ForkJoinActorBenchmark.PingPong]) - (ping, pong) - } - } - - def stopActors(refs: Vector[(ActorRef, ActorRef)]): Unit = { - if (refs ne null) { - refs.foreach { - case (ping, pong) => - system.stop(ping) - system.stop(pong) - } - awaitTerminated(refs) - } - } - - def awaitTerminated(refs: Vector[(ActorRef, ActorRef)]): Unit = { - if (refs ne null) refs.foreach { - case (ping, pong) => - val p = TestProbe() - p.watch(ping) - p.expectTerminated(ping, timeout) - p.watch(pong) - p.expectTerminated(pong, timeout) - } - } - - def sendMessage(refs: Vector[(ActorRef, ActorRef)], inFlight: Int): Unit = { - for { - (ping, pong) <- refs - _ <- 1 to inFlight - } { - ping.tell(Message, pong) - } - } + @Benchmark + @OperationsPerInvocation(totalMessagesTwoActors) + def pingPong(): Unit = benchmarkPingPongActors(messages, twoActors, "default-dispatcher", tpt, timeout) @Benchmark - @Measurement(timeUnit = TimeUnit.MILLISECONDS) - @OperationsPerInvocation(messages) - def pingPong(): Unit = { - // only one message in flight - sendMessage(pingPongActors, inFlight = 1) - awaitTerminated(pingPongActors) - } - - @Benchmark - @Measurement(timeUnit = TimeUnit.MILLISECONDS) @OperationsPerInvocation(totalMessagesLessThanCores) - def pingPongLessActorsThanCores(): Unit = { - sendMessage(pingPongLessActorsThanCoresActors, inFlight = 2 * tpt) - awaitTerminated(pingPongLessActorsThanCoresActors) - } + def pingPongLessActorsThanCores(): Unit = benchmarkPingPongActors(messages, lessThanCoresActors, "default-dispatcher", tpt, timeout) @Benchmark - @Measurement(timeUnit = TimeUnit.MILLISECONDS) @OperationsPerInvocation(totalMessagesSameAsCores) - def pingPongSameNumberOfActorsAsCores(): Unit = { - sendMessage(pingPongSameNumberOfActorsAsCoresActors, inFlight = 2 * tpt) - awaitTerminated(pingPongSameNumberOfActorsAsCoresActors) - } + def pingPongSameNumberOfActorsAsCores(): Unit = benchmarkPingPongActors(messages, sameAsCoresActors, "default-dispatcher", tpt, timeout) @Benchmark - @Measurement(timeUnit = TimeUnit.MILLISECONDS) @OperationsPerInvocation(totalMessagesMoreThanCores) - def pingPongMoreActorsThanCores(): Unit = { - sendMessage(pingPongMoreActorsThanCoresActors, inFlight = 2 * tpt) - awaitTerminated(pingPongMoreActorsThanCoresActors) - } + def pingPongMoreActorsThanCores(): Unit = benchmarkPingPongActors(messages, moreThanCoresActors, "default-dispatcher", tpt, timeout) // @Benchmark // @Measurement(timeUnit = TimeUnit.MILLISECONDS) // @OperationsPerInvocation(messages) def floodPipe(): Unit = { - val end = system.actorOf(Props(classOf[ForkJoinActorBenchmark.Pipe], None)) - val middle = system.actorOf(Props(classOf[ForkJoinActorBenchmark.Pipe], Some(end))) - val penultimate = system.actorOf(Props(classOf[ForkJoinActorBenchmark.Pipe], Some(middle))) - val beginning = system.actorOf(Props(classOf[ForkJoinActorBenchmark.Pipe], Some(penultimate))) + val end = system.actorOf(Props(classOf[Pipe], None)) + val middle = system.actorOf(Props(classOf[Pipe], Some(end))) + val penultimate = system.actorOf(Props(classOf[Pipe], Some(middle))) + val beginning = system.actorOf(Props(classOf[Pipe], Some(penultimate))) val p = TestProbe() p.watch(end) @@ -178,39 +114,23 @@ class ForkJoinActorBenchmark { } object ForkJoinActorBenchmark { - case object Stop - case object Message - final val timeout = 15.seconds - final val messages = 400000 + final val messages = 2000000 // messages per actor pair + // Constants because they are used in annotations // update according to cpu final val cores = 8 - // 2 actors per - final val moreThanCoresActorPairs = cores * 2 - final val lessThanCoresActorPairs = (cores / 2) - 1 - final val totalMessagesMoreThanCores = moreThanCoresActorPairs * messages - final val totalMessagesLessThanCores = lessThanCoresActorPairs * messages - final val totalMessagesSameAsCores = cores * messages + final val coresStr = "8" + final val cores2xStr = "16" + final val cores4xStr = "24" - class Pipe(next: Option[ActorRef]) extends Actor { - def receive = { - case Message => - if (next.isDefined) next.get forward Message - case Stop => - context stop self - if (next.isDefined) next.get forward Stop - } - } - class PingPong extends Actor { - var left = messages / 2 - def receive = { - case Message => + final val twoActors = 2 + final val moreThanCoresActors = cores * 2 + final val lessThanCoresActors = cores / 2 + final val sameAsCoresActors = cores - if (left <= 1) - context stop self + final val totalMessagesTwoActors = messages + final val totalMessagesMoreThanCores = (moreThanCoresActors * messages) / 2 + final val totalMessagesLessThanCores = (lessThanCoresActors * messages) / 2 + final val totalMessagesSameAsCores = (sameAsCoresActors * messages) / 2 - sender() ! Message - left -= 1 - } - } -} +} \ No newline at end of file diff --git a/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala b/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala new file mode 100644 index 0000000000..dd7e0f8e41 --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ +package akka.actor + +import java.util.concurrent.CountDownLatch + +import scala.collection.mutable +import scala.util.Random + +object RequestResponseActors { + + case class Request(userId: Int) + case class User(userId: Int, firstName: String, lastName: String, ssn: Int, friends: Seq[Int]) + + class UserQueryActor(latch: CountDownLatch, numQueries: Int, numUsersInDB: Int) extends Actor { + + private var left = numQueries + private val receivedUsers: mutable.Map[Int, User] = mutable.Map() + private val randGenerator = new Random() + + override def receive: Receive = { + case u: User => { + receivedUsers.put(u.userId, u) + if (left == 0) { + latch.countDown() + context stop self + } else { + sender() ! Request(randGenerator.nextInt(numUsersInDB)) + } + left -= 1 + } + } + } + + object UserQueryActor { + def props(latch: CountDownLatch, numQueries: Int, numUsersInDB: Int) = { + Props(new UserQueryActor(latch, numQueries, numUsersInDB)) + } + } + + class UserServiceActor(userDb: Map[Int, User], latch: CountDownLatch, numQueries: Int) extends Actor { + private var left = numQueries + def receive = { + case Request(id) => + userDb.get(id) match { + case Some(u) => sender() ! u + case None => + } + if (left == 0) { + latch.countDown() + context stop self + } + left -= 1 + } + + } + + object UserServiceActor { + def props(latch: CountDownLatch, numQueries: Int, numUsersInDB: Int) = { + val r = new Random() + val users = for { + id <- 0 until numUsersInDB + firstName = r.nextString(5) + lastName = r.nextString(7) + ssn = r.nextInt() + friendIds = for { _ <- 0 until 5 } yield r.nextInt(numUsersInDB) + } yield id -> User(id, firstName, lastName, ssn, friendIds) + Props(new UserServiceActor(users.toMap, latch, numQueries)) + } + } + + def startUserQueryActorPairs(numActors: Int, numQueriesPerActor: Int, numUsersInDBPerActor: Int, dispatcher: String)(implicit system: ActorSystem) = { + val fullPathToDispatcher = "akka.actor." + dispatcher + val latch = new CountDownLatch(numActors) + val actorsPairs = for { + i <- (1 to (numActors / 2)).toVector + userQueryActor = system.actorOf(UserQueryActor.props(latch, numQueriesPerActor, numUsersInDBPerActor).withDispatcher(fullPathToDispatcher)) + userServiceActor = system.actorOf(UserServiceActor.props(latch, numQueriesPerActor, numUsersInDBPerActor).withDispatcher(fullPathToDispatcher)) + } yield (userQueryActor, userServiceActor) + (actorsPairs, latch) + } + + def initiateQuerySimulation(requestResponseActorPairs: Seq[(ActorRef, ActorRef)], inFlight: Int) = { + for { + (queryActor, serviceActor) <- requestResponseActorPairs + i <- 1 to inFlight + } { + serviceActor.tell(Request(i), queryActor) + } + } + +} diff --git a/akka-bench-jmh/src/main/scala/akka/remote/artery/LatchSink.scala b/akka-bench-jmh/src/main/scala/akka/remote/artery/LatchSink.scala index 5953b352bc..4ac234e9cd 100644 --- a/akka-bench-jmh/src/main/scala/akka/remote/artery/LatchSink.scala +++ b/akka-bench-jmh/src/main/scala/akka/remote/artery/LatchSink.scala @@ -24,6 +24,11 @@ class LatchSink(countDownAfter: Int, latch: CountDownLatch) extends GraphStage[S override def preStart(): Unit = pull(in) + override def onUpstreamFailure(ex: Throwable): Unit = { + println(ex.getMessage) + ex.printStackTrace() + } + override def onPush(): Unit = { n += 1 if (n == countDownAfter) diff --git a/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala new file mode 100644 index 0000000000..f8c1c7ead6 --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala @@ -0,0 +1,128 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ + +package akka.stream + +import java.util.concurrent.TimeUnit +import akka.NotUsed +import akka.actor.ActorSystem +import akka.stream.scaladsl._ +import com.typesafe.config.ConfigFactory +import org.openjdk.jmh.annotations._ +import java.util.concurrent.Semaphore +import scala.util.Success +import akka.stream.impl.fusing.GraphStages +import org.reactivestreams._ +import scala.concurrent.Await +import scala.concurrent.duration._ +import akka.remote.artery.BenchTestSource +import java.util.concurrent.CountDownLatch +import akka.remote.artery.LatchSink +import akka.stream.impl.PhasedFusingActorMaterializer +import akka.testkit.TestProbe +import akka.stream.impl.StreamSupervisor +import akka.stream.scaladsl.PartitionHub +import akka.remote.artery.FixedSizePartitionHub + +object PartitionHubBenchmark { + final val OperationsPerInvocation = 100000 +} + +@State(Scope.Benchmark) +@OutputTimeUnit(TimeUnit.SECONDS) +@BenchmarkMode(Array(Mode.Throughput)) +class PartitionHubBenchmark { + import PartitionHubBenchmark._ + + val config = ConfigFactory.parseString( + """ + akka.actor.default-dispatcher { + executor = "fork-join-executor" + fork-join-executor { + parallelism-factor = 1 + } + } + """ + ) + + implicit val system = ActorSystem("PartitionHubBenchmark", config) + + var materializer: ActorMaterializer = _ + + @Param(Array("2", "5", "10", "20", "30")) + var NumberOfStreams = 0 + + @Param(Array("256")) + var BufferSize = 0 + + var testSource: Source[java.lang.Integer, NotUsed] = _ + + @Setup + def setup(): Unit = { + val settings = ActorMaterializerSettings(system) + materializer = ActorMaterializer(settings) + + testSource = Source.fromGraph(new BenchTestSource(OperationsPerInvocation)) + } + + @TearDown + def shutdown(): Unit = { + Await.result(system.terminate(), 5.seconds) + } + + @Benchmark + @OperationsPerInvocation(OperationsPerInvocation) + def partition(): Unit = { + val N = OperationsPerInvocation + val latch = new CountDownLatch(NumberOfStreams) + + val source = testSource + .runWith(PartitionHub.sink[java.lang.Integer]( + (size, elem) => elem.intValue % NumberOfStreams, + startAfterNrOfConsumers = NumberOfStreams, bufferSize = BufferSize + ))(materializer) + + for (_ <- 0 until NumberOfStreams) + source.runWith(new LatchSink(N / NumberOfStreams, latch))(materializer) + + if (!latch.await(30, TimeUnit.SECONDS)) { + dumpMaterializer() + throw new RuntimeException("Latch didn't complete in time") + } + } + + // @Benchmark + // @OperationsPerInvocation(OperationsPerInvocation) + def arteryLanes(): Unit = { + val N = OperationsPerInvocation + val latch = new CountDownLatch(NumberOfStreams) + + val source = testSource + .runWith( + Sink.fromGraph(new FixedSizePartitionHub( + _.intValue % NumberOfStreams, + lanes = NumberOfStreams, bufferSize = BufferSize + )) + )(materializer) + + for (_ <- 0 until NumberOfStreams) + source.runWith(new LatchSink(N / NumberOfStreams, latch))(materializer) + + if (!latch.await(30, TimeUnit.SECONDS)) { + dumpMaterializer() + throw new RuntimeException("Latch didn't complete in time") + } + } + + private def dumpMaterializer(): Unit = { + materializer match { + case impl: PhasedFusingActorMaterializer ⇒ + val probe = TestProbe()(system) + impl.supervisor.tell(StreamSupervisor.GetChildren, probe.ref) + val children = probe.expectMsgType[StreamSupervisor.Children].children + children.foreach(_ ! StreamSupervisor.PrintDebugDump) + } + } + +} diff --git a/akka-bench-jmh/src/main/scala/akka/util/ImmutableIntMapBench.scala b/akka-bench-jmh/src/main/scala/akka/util/ImmutableIntMapBench.scala new file mode 100644 index 0000000000..e7aa0c129b --- /dev/null +++ b/akka-bench-jmh/src/main/scala/akka/util/ImmutableIntMapBench.scala @@ -0,0 +1,112 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ +package akka.util + +import org.openjdk.jmh.annotations._ +import java.util.concurrent.TimeUnit +import scala.annotation.tailrec + +@State(Scope.Benchmark) +@BenchmarkMode(Array(Mode.Throughput)) +@Fork(1) +@Threads(1) +@Warmup(iterations = 10, time = 5, timeUnit = TimeUnit.MICROSECONDS, batchSize = 1) +@Measurement(iterations = 10, time = 15, timeUnit = TimeUnit.MICROSECONDS, batchSize = 1) +class ImmutableIntMapBench { + + @tailrec private[this] final def add(n: Int, c: ImmutableIntMap = ImmutableIntMap.empty): ImmutableIntMap = + if (n >= 0) add(n - 1, c.updated(n, n)) + else c + + @tailrec private[this] final def contains(n: Int, by: Int, to: Int, in: ImmutableIntMap, b: Boolean): Boolean = + if (n <= to) { + val result = in.contains(n) + contains(n + by, by, to, in, result) + } else b + + @tailrec private[this] final def get(n: Int, by: Int, to: Int, in: ImmutableIntMap, b: Int): Int = + if (n <= to) { + val result = in.get(n) + get(n + by, by, to, in, result) + } else b + + @tailrec private[this] final def hashCode(n: Int, in: ImmutableIntMap, b: Int): Int = + if (n >= 0) { + val result = in.hashCode + hashCode(n - 1, in, result) + } else b + + @tailrec private[this] final def updateIfAbsent(n: Int, by: Int, to: Int, in: ImmutableIntMap): ImmutableIntMap = + if (n <= to) updateIfAbsent(n + by, by, to, in.updateIfAbsent(n, n)) + else in + + @tailrec private[this] final def getKey(iterations: Int, key: Int, from: ImmutableIntMap): ImmutableIntMap = { + if (iterations > 0 && key != Int.MinValue) { + val k = from.get(key) + getKey(iterations - 1, k, from) + } else from + } + + val odd1000 = (0 to 1000).iterator.filter(_ % 2 == 1).foldLeft(ImmutableIntMap.empty)((l, i) => l.updated(i, i)) + + @Benchmark + @OperationsPerInvocation(1) + def add1(): ImmutableIntMap = add(1) + + @Benchmark + @OperationsPerInvocation(10) + def add10(): ImmutableIntMap = add(10) + + @Benchmark + @OperationsPerInvocation(100) + def add100(): ImmutableIntMap = add(100) + + @Benchmark + @OperationsPerInvocation(1000) + def add1000(): ImmutableIntMap = add(1000) + + @Benchmark + @OperationsPerInvocation(10000) + def add10000(): ImmutableIntMap = add(10000) + + @Benchmark + @OperationsPerInvocation(500) + def contains(): Boolean = contains(n = 1, by = 2, to = odd1000.size, in = odd1000, b = false) + + @Benchmark + @OperationsPerInvocation(500) + def notcontains(): Boolean = contains(n = 0, by = 2, to = odd1000.size, in = odd1000, b = false) + + @Benchmark + @OperationsPerInvocation(500) + def get(): Int = get(n = 1, by = 2, to = odd1000.size, in = odd1000, b = Int.MinValue) + + @Benchmark + @OperationsPerInvocation(500) + def notget(): Int = get(n = 0, by = 2, to = odd1000.size, in = odd1000, b = Int.MinValue) + + @Benchmark + @OperationsPerInvocation(500) + def updateNotAbsent(): ImmutableIntMap = updateIfAbsent(n = 1, by = 2, to = odd1000.size, in = odd1000) + + @Benchmark + @OperationsPerInvocation(500) + def updateAbsent(): ImmutableIntMap = updateIfAbsent(n = 0, by = 2, to = odd1000.size, in = odd1000) + + @Benchmark + @OperationsPerInvocation(10000) + def hashcode(): Int = hashCode(10000, odd1000, 0) + + @Benchmark + @OperationsPerInvocation(1000) + def getMidElement(): ImmutableIntMap = getKey(iterations = 1000, key = 249, from = odd1000) + + @Benchmark + @OperationsPerInvocation(1000) + def getLoElement(): ImmutableIntMap = getKey(iterations = 1000, key = 1, from = odd1000) + + @Benchmark + @OperationsPerInvocation(1000) + def getHiElement(): ImmutableIntMap = getKey(iterations = 1000, key = 999, from = odd1000) +} \ No newline at end of file diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala index 82b0159125..6413c33302 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala @@ -372,7 +372,7 @@ abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMe val (sum, count) = acc(address) acc + (address → ((sum + capacity, count + 1))) }.map { - case (addr, (sum, count)) ⇒ addr → (sum / count) + case (address, (sum, count)) ⇒ address → (sum / count) } } @@ -434,7 +434,7 @@ abstract class CapacityMetricsSelector extends MetricsSelector { val (_, min) = capacity.minBy { case (_, c) ⇒ c } // lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero val divisor = math.max(0.01, min) - capacity map { case (addr, c) ⇒ (addr → math.round((c) / divisor).toInt) } + capacity map { case (address, c) ⇒ (address → math.round((c) / divisor).toInt) } } } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index dee09b0def..cc087a8182 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -138,7 +138,7 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa val router = system.actorOf( ClusterRouterPool( local = AdaptiveLoadBalancingPool(HeapMetricsSelector), - settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)). + settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true)). props(Props[Echo]), name) // it may take some time until router receives cluster member events diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala index d53d24d7c0..6b7891ee74 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala @@ -45,7 +45,7 @@ object StatsSampleSpecConfig extends MultiNodeConfig { cluster { enabled = on allow-local-routees = on - use-role = compute + use-roles = ["compute"] } } } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala index 6fd82f4e07..3fd57f14f5 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala @@ -57,7 +57,7 @@ abstract class StatsService2 extends Actor { val workerRouter = context.actorOf( ClusterRouterGroup(ConsistentHashingGroup(Nil), ClusterRouterGroupSettings( totalInstances = 100, routeesPaths = List("/user/statsWorker"), - allowLocalRoutees = true, useRole = Some("compute"))).props(), + allowLocalRoutees = true, useRoles = Set("compute"))).props(), name = "workerRouter2") //#router-lookup-in-code } @@ -71,7 +71,7 @@ abstract class StatsService3 extends Actor { val workerRouter = context.actorOf( ClusterRouterPool(ConsistentHashingPool(0), ClusterRouterPoolSettings( totalInstances = 100, maxInstancesPerNode = 3, - allowLocalRoutees = false, useRole = None)).props(Props[StatsWorker]), + allowLocalRoutees = false)).props(Props[StatsWorker]), name = "workerRouter3") //#router-deploy-in-code } diff --git a/akka-cluster-sharding/src/main/mima-filters/2.4.0.backwards.excludes b/akka-cluster-sharding/src/main/mima-filters/2.4.0.backwards.excludes new file mode 100644 index 0000000000..cc312b59da --- /dev/null +++ b/akka-cluster-sharding/src/main/mima-filters/2.4.0.backwards.excludes @@ -0,0 +1,7 @@ +# #18722 internal changes to actor +ProblemFilters.exclude[Problem]("akka.cluster.sharding.DDataShardCoordinator*") + +ProblemFilters.exclude[MissingTypesProblem]("akka.cluster.sharding.ShardRegion$GetCurrentRegions$") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardCoordinator#Internal#State.apply") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardCoordinator#Internal#State.copy") + diff --git a/akka-cluster-sharding/src/main/mima-filters/2.4.11.backwards.excludes b/akka-cluster-sharding/src/main/mima-filters/2.4.11.backwards.excludes new file mode 100644 index 0000000000..5125b626c0 --- /dev/null +++ b/akka-cluster-sharding/src/main/mima-filters/2.4.11.backwards.excludes @@ -0,0 +1,2 @@ +# #21194 renamed internal actor method +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardCoordinator.allocateShardHomes") diff --git a/akka-cluster-sharding/src/main/mima-filters/2.4.17.backwards.excludes b/akka-cluster-sharding/src/main/mima-filters/2.4.17.backwards.excludes new file mode 100644 index 0000000000..c2ca7bf17f --- /dev/null +++ b/akka-cluster-sharding/src/main/mima-filters/2.4.17.backwards.excludes @@ -0,0 +1,7 @@ +# Internal MessageBuffer for actors +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.Shard.totalBufferSize") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.sharding.Shard.messageBuffers") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.Shard.messageBuffers_=") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.totalBufferSize") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.sharding.ShardRegion.shardBuffers") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.ShardRegion.shardBuffers_=") diff --git a/akka-cluster-sharding/src/main/mima-filters/2.4.4.backwards.excludes b/akka-cluster-sharding/src/main/mima-filters/2.4.4.backwards.excludes new file mode 100644 index 0000000000..399fa9d575 --- /dev/null +++ b/akka-cluster-sharding/src/main/mima-filters/2.4.4.backwards.excludes @@ -0,0 +1,5 @@ +# #20319 - remove not needed "no. of persists" counter in sharding +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.PersistentShard.persistCount") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.PersistentShard.persistCount_=") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.PersistentShardCoordinator.persistCount") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.PersistentShardCoordinator.persistCount_=") diff --git a/akka-cluster-sharding/src/main/mima-filters/2.4.x.backwards.excludes b/akka-cluster-sharding/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..66a939ee10 --- /dev/null +++ b/akka-cluster-sharding/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,18 @@ +# #22141 sharding minCap +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.updatingStateTimeout") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.waitingForStateTimeout") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.this") + +# #22154 Sharding remembering entities with ddata, internal actors +ProblemFilters.exclude[Problem]("akka.cluster.sharding.Shard*") +ProblemFilters.exclude[Problem]("akka.cluster.sharding.PersistentShard*") +ProblemFilters.exclude[Problem]("akka.cluster.sharding.ClusterShardingGuardian*") +ProblemFilters.exclude[Problem]("akka.cluster.sharding.ShardRegion*") + +# #21423 remove deprecated persist method (persistAll) +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.PersistentShard.persist") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.PersistentShard.persistAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.PersistentShardCoordinator.persist") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.PersistentShardCoordinator.persistAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.RemoveInternalClusterShardingData#RemoveOnePersistenceId.persist") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.RemoveInternalClusterShardingData#RemoveOnePersistenceId.persistAsync") diff --git a/akka-cluster-sharding/src/main/mima-filters/2.5.1.backwards.excludes b/akka-cluster-sharding/src/main/mima-filters/2.5.1.backwards.excludes new file mode 100644 index 0000000000..f8e1e61bdf --- /dev/null +++ b/akka-cluster-sharding/src/main/mima-filters/2.5.1.backwards.excludes @@ -0,0 +1,6 @@ +# #22868 store shards +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.sendUpdate") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.waitingForUpdate") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.getState") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.waitingForState") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.this") diff --git a/akka-cluster-tools/src/main/mima-filters/2.4.17.backwards.excludes b/akka-cluster-tools/src/main/mima-filters/2.4.17.backwards.excludes new file mode 100644 index 0000000000..b8a82dd576 --- /dev/null +++ b/akka-cluster-tools/src/main/mima-filters/2.4.17.backwards.excludes @@ -0,0 +1,6 @@ +# Internal MessageBuffer for actors +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.pubsub.PerGroupingBuffer.akka$cluster$pubsub$PerGroupingBuffer$$buffers") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.pubsub.PerGroupingBuffer.akka$cluster$pubsub$PerGroupingBuffer$_setter_$akka$cluster$pubsub$PerGroupingBuffer$$buffers_=") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.singleton.ClusterSingletonProxy.buffer") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.singleton.ClusterSingletonProxy.buffer_=") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.client.ClusterClient.buffer") diff --git a/akka-cluster-tools/src/main/mima-filters/2.4.4.backwards.excludes b/akka-cluster-tools/src/main/mima-filters/2.4.4.backwards.excludes new file mode 100644 index 0000000000..9709646bd0 --- /dev/null +++ b/akka-cluster-tools/src/main/mima-filters/2.4.4.backwards.excludes @@ -0,0 +1,4 @@ +# #20462 - now uses a Set instead of a Seq within the private API of the cluster client +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.client.ClusterClient.contacts_=") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.client.ClusterClient.contacts") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.client.ClusterClient.initialContactsSel") diff --git a/akka-cluster-tools/src/main/mima-filters/2.4.7.backwards.excludes b/akka-cluster-tools/src/main/mima-filters/2.4.7.backwards.excludes new file mode 100644 index 0000000000..570789aa39 --- /dev/null +++ b/akka-cluster-tools/src/main/mima-filters/2.4.7.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[Problem]("akka.cluster.pubsub.DistributedPubSubMediator$Internal*") +ProblemFilters.exclude[Problem]("akka.cluster.pubsub.DistributedPubSubMediator#Internal*") diff --git a/akka-cluster-tools/src/main/mima-filters/2.4.8.backwards.excludes b/akka-cluster-tools/src/main/mima-filters/2.4.8.backwards.excludes new file mode 100644 index 0000000000..e55c42aa70 --- /dev/null +++ b/akka-cluster-tools/src/main/mima-filters/2.4.8.backwards.excludes @@ -0,0 +1,7 @@ +# #20846 change of internal Status message +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.pubsub.protobuf.msg.DistributedPubSubMessages#StatusOrBuilder.getReplyToStatus") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.pubsub.protobuf.msg.DistributedPubSubMessages#StatusOrBuilder.hasReplyToStatus") + +# #20942 ClusterSingleton +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.singleton.ClusterSingletonManager.addRemoved") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.singleton.ClusterSingletonManager.selfAddressOption") diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala index 9fd4e50bb7..fb2f3a6430 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala @@ -10,7 +10,9 @@ import scala.concurrent.duration._ import java.util.concurrent.ThreadLocalRandom import java.net.URLEncoder import java.net.URLDecoder + import akka.actor._ +import akka.annotation.DoNotInherit import akka.cluster.Cluster import akka.cluster.ClusterEvent._ import akka.cluster.Member @@ -24,6 +26,7 @@ import akka.routing.RouterEnvelope import akka.routing.RoundRobinRoutingLogic import akka.routing.ConsistentHashingRoutingLogic import akka.routing.BroadcastRoutingLogic + import scala.collection.immutable.TreeMap import com.typesafe.config.Config import akka.dispatch.Dispatchers @@ -399,6 +402,7 @@ object DistributedPubSubMediator { */ def wrapIfNeeded: Any ⇒ Any = { case msg: RouterEnvelope ⇒ MediatorRouterEnvelope(msg) + case null ⇒ throw InvalidMessageException("Message must not be null") case msg: Any ⇒ msg } } @@ -475,7 +479,10 @@ trait DistributedPubSubMessage extends Serializable * Successful `Subscribe` and `Unsubscribe` is acknowledged with * [[DistributedPubSubMediator.SubscribeAck]] and [[DistributedPubSubMediator.UnsubscribeAck]] * replies. + * + * Not intended for subclassing by user code. */ +@DoNotInherit class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Actor with ActorLogging with PerGroupingBuffer { import DistributedPubSubMediator._ diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index cd4f790a4e..f0381063f6 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -5,6 +5,7 @@ package akka.cluster.singleton import com.typesafe.config.Config + import scala.concurrent.duration._ import scala.collection.immutable import akka.actor.Actor @@ -25,9 +26,11 @@ import akka.AkkaException import akka.actor.NoSerializationVerificationNeeded import akka.cluster.UniqueAddress import akka.cluster.ClusterEvent + import scala.concurrent.Promise import akka.Done import akka.actor.CoordinatedShutdown +import akka.annotation.DoNotInherit import akka.pattern.ask import akka.util.Timeout import akka.cluster.ClusterSettings @@ -395,6 +398,8 @@ class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(mess * Use factory method [[ClusterSingletonManager#props]] to create the * [[akka.actor.Props]] for the actor. * + * Not intended for subclassing by user code. + * * * @param singletonProps [[akka.actor.Props]] of the singleton actor instance. * @@ -408,6 +413,7 @@ class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(mess * * @param settings see [[ClusterSingletonManagerSettings]] */ +@DoNotInherit class ClusterSingletonManager( singletonProps: Props, terminationMessage: Any, diff --git a/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java b/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java index 5de6397475..fdf3fc93d5 100644 --- a/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java +++ b/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java @@ -82,7 +82,7 @@ public class DistributedPubSubMediatorTest extends JUnitSuite { .match(String.class, msg -> log.info("Got: {}", msg)) .match(DistributedPubSubMediator.SubscribeAck.class, msg -> - log.info("subscribing")) + log.info("subscribed")) .build(); } } @@ -126,8 +126,6 @@ public class DistributedPubSubMediatorTest extends JUnitSuite { return receiveBuilder() .match(String.class, msg -> log.info("Got: {}", msg)) - .match(DistributedPubSubMediator.SubscribeAck.class, msg -> - log.info("subscribing")) .build(); } diff --git a/akka-cluster/src/main/java/akka/cluster/protobuf/msg/ClusterMessages.java b/akka-cluster/src/main/java/akka/cluster/protobuf/msg/ClusterMessages.java index 533e3dd8a0..e230001a87 100644 --- a/akka-cluster/src/main/java/akka/cluster/protobuf/msg/ClusterMessages.java +++ b/akka-cluster/src/main/java/akka/cluster/protobuf/msg/ClusterMessages.java @@ -14016,6 +14016,26 @@ public final class ClusterMessages { */ akka.protobuf.ByteString getUseRoleBytes(); + + // repeated string useRoles = 5; + /** + * repeated string useRoles = 5; + */ + java.util.List + getUseRolesList(); + /** + * repeated string useRoles = 5; + */ + int getUseRolesCount(); + /** + * repeated string useRoles = 5; + */ + java.lang.String getUseRoles(int index); + /** + * repeated string useRoles = 5; + */ + akka.protobuf.ByteString + getUseRolesBytes(int index); } /** * Protobuf type {@code ClusterRouterPoolSettings} @@ -14088,6 +14108,14 @@ public final class ClusterMessages { useRole_ = input.readBytes(); break; } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + useRoles_ = new akka.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000010; + } + useRoles_.add(input.readBytes()); + break; + } } } } catch (akka.protobuf.InvalidProtocolBufferException e) { @@ -14096,6 +14124,9 @@ public final class ClusterMessages { throw new akka.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + useRoles_ = new akka.protobuf.UnmodifiableLazyStringList(useRoles_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -14219,11 +14250,42 @@ public final class ClusterMessages { } } + // repeated string useRoles = 5; + public static final int USEROLES_FIELD_NUMBER = 5; + private akka.protobuf.LazyStringList useRoles_; + /** + * repeated string useRoles = 5; + */ + public java.util.List + getUseRolesList() { + return useRoles_; + } + /** + * repeated string useRoles = 5; + */ + public int getUseRolesCount() { + return useRoles_.size(); + } + /** + * repeated string useRoles = 5; + */ + public java.lang.String getUseRoles(int index) { + return useRoles_.get(index); + } + /** + * repeated string useRoles = 5; + */ + public akka.protobuf.ByteString + getUseRolesBytes(int index) { + return useRoles_.getByteString(index); + } + private void initFields() { totalInstances_ = 0; maxInstancesPerNode_ = 0; allowLocalRoutees_ = false; useRole_ = ""; + useRoles_ = akka.protobuf.LazyStringArrayList.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -14261,6 +14323,9 @@ public final class ClusterMessages { if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, getUseRoleBytes()); } + for (int i = 0; i < useRoles_.size(); i++) { + output.writeBytes(5, useRoles_.getByteString(i)); + } getUnknownFields().writeTo(output); } @@ -14286,6 +14351,15 @@ public final class ClusterMessages { size += akka.protobuf.CodedOutputStream .computeBytesSize(4, getUseRoleBytes()); } + { + int dataSize = 0; + for (int i = 0; i < useRoles_.size(); i++) { + dataSize += akka.protobuf.CodedOutputStream + .computeBytesSizeNoTag(useRoles_.getByteString(i)); + } + size += dataSize; + size += 1 * getUseRolesList().size(); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -14410,6 +14484,8 @@ public final class ClusterMessages { bitField0_ = (bitField0_ & ~0x00000004); useRole_ = ""; bitField0_ = (bitField0_ & ~0x00000008); + useRoles_ = akka.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -14454,6 +14530,12 @@ public final class ClusterMessages { to_bitField0_ |= 0x00000008; } result.useRole_ = useRole_; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + useRoles_ = new akka.protobuf.UnmodifiableLazyStringList( + useRoles_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.useRoles_ = useRoles_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -14484,6 +14566,16 @@ public final class ClusterMessages { useRole_ = other.useRole_; onChanged(); } + if (!other.useRoles_.isEmpty()) { + if (useRoles_.isEmpty()) { + useRoles_ = other.useRoles_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureUseRolesIsMutable(); + useRoles_.addAll(other.useRoles_); + } + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -14696,6 +14788,99 @@ public final class ClusterMessages { return this; } + // repeated string useRoles = 5; + private akka.protobuf.LazyStringList useRoles_ = akka.protobuf.LazyStringArrayList.EMPTY; + private void ensureUseRolesIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + useRoles_ = new akka.protobuf.LazyStringArrayList(useRoles_); + bitField0_ |= 0x00000010; + } + } + /** + * repeated string useRoles = 5; + */ + public java.util.List + getUseRolesList() { + return java.util.Collections.unmodifiableList(useRoles_); + } + /** + * repeated string useRoles = 5; + */ + public int getUseRolesCount() { + return useRoles_.size(); + } + /** + * repeated string useRoles = 5; + */ + public java.lang.String getUseRoles(int index) { + return useRoles_.get(index); + } + /** + * repeated string useRoles = 5; + */ + public akka.protobuf.ByteString + getUseRolesBytes(int index) { + return useRoles_.getByteString(index); + } + /** + * repeated string useRoles = 5; + */ + public Builder setUseRoles( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUseRolesIsMutable(); + useRoles_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string useRoles = 5; + */ + public Builder addUseRoles( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUseRolesIsMutable(); + useRoles_.add(value); + onChanged(); + return this; + } + /** + * repeated string useRoles = 5; + */ + public Builder addAllUseRoles( + java.lang.Iterable values) { + ensureUseRolesIsMutable(); + super.addAll(values, useRoles_); + onChanged(); + return this; + } + /** + * repeated string useRoles = 5; + */ + public Builder clearUseRoles() { + useRoles_ = akka.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + /** + * repeated string useRoles = 5; + */ + public Builder addUseRolesBytes( + akka.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUseRolesIsMutable(); + useRoles_.add(value); + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:ClusterRouterPoolSettings) } @@ -14842,15 +15027,15 @@ public final class ClusterMessages { " \002(\0132\005.Pool\022,\n\010settings\030\002 \002(\0132\032.ClusterR" + "outerPoolSettings\"<\n\004Pool\022\024\n\014serializerI" + "d\030\001 \002(\r\022\020\n\010manifest\030\002 \002(\t\022\014\n\004data\030\003 \002(\014\"" + - "|\n\031ClusterRouterPoolSettings\022\026\n\016totalIns" + - "tances\030\001 \002(\r\022\033\n\023maxInstancesPerNode\030\002 \002(" + - "\r\022\031\n\021allowLocalRoutees\030\003 \002(\010\022\017\n\007useRole\030" + - "\004 \001(\t*D\n\022ReachabilityStatus\022\r\n\tReachable", - "\020\000\022\017\n\013Unreachable\020\001\022\016\n\nTerminated\020\002*b\n\014M" + - "emberStatus\022\013\n\007Joining\020\000\022\006\n\002Up\020\001\022\013\n\007Leav" + - "ing\020\002\022\013\n\007Exiting\020\003\022\010\n\004Down\020\004\022\013\n\007Removed\020" + - "\005\022\014\n\010WeaklyUp\020\006B\035\n\031akka.cluster.protobuf" + - ".msgH\001" + "\216\001\n\031ClusterRouterPoolSettings\022\026\n\016totalIn" + + "stances\030\001 \002(\r\022\033\n\023maxInstancesPerNode\030\002 \002" + + "(\r\022\031\n\021allowLocalRoutees\030\003 \002(\010\022\017\n\007useRole" + + "\030\004 \001(\t\022\020\n\010useRoles\030\005 \003(\t*D\n\022Reachability", + "Status\022\r\n\tReachable\020\000\022\017\n\013Unreachable\020\001\022\016" + + "\n\nTerminated\020\002*b\n\014MemberStatus\022\013\n\007Joinin" + + "g\020\000\022\006\n\002Up\020\001\022\013\n\007Leaving\020\002\022\013\n\007Exiting\020\003\022\010\n" + + "\004Down\020\004\022\013\n\007Removed\020\005\022\014\n\010WeaklyUp\020\006B\035\n\031ak" + + "ka.cluster.protobuf.msgH\001" }; akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -14964,7 +15149,7 @@ public final class ClusterMessages { internal_static_ClusterRouterPoolSettings_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterRouterPoolSettings_descriptor, - new java.lang.String[] { "TotalInstances", "MaxInstancesPerNode", "AllowLocalRoutees", "UseRole", }); + new java.lang.String[] { "TotalInstances", "MaxInstancesPerNode", "AllowLocalRoutees", "UseRole", "UseRoles", }); return null; } }; diff --git a/akka-cluster/src/main/mima-filters/2.4.10.backwards.excludes b/akka-cluster/src/main/mima-filters/2.4.10.backwards.excludes new file mode 100644 index 0000000000..4af828f2be --- /dev/null +++ b/akka-cluster/src/main/mima-filters/2.4.10.backwards.excludes @@ -0,0 +1,3 @@ +# #20644 long uids +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#UniqueAddressOrBuilder.hasUid2") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#UniqueAddressOrBuilder.getUid2") diff --git a/akka-cluster/src/main/mima-filters/2.4.14.backwards.excludes b/akka-cluster/src/main/mima-filters/2.4.14.backwards.excludes new file mode 100644 index 0000000000..76a56df0c2 --- /dev/null +++ b/akka-cluster/src/main/mima-filters/2.4.14.backwards.excludes @@ -0,0 +1 @@ +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ClusterEvent#ReachabilityEvent.member") diff --git a/akka-cluster/src/main/mima-filters/2.4.x.backwards.excludes b/akka-cluster/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..b97f6a94d1 --- /dev/null +++ b/akka-cluster/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,87 @@ +# #21423 Remove deprecated metrics +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterReadView.clusterMetrics") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.InternalClusterAction$MetricsTick$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsCollector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.Metric") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsCollector$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.Metric$") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsMovingAverageHalfLife") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsGossipInterval") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsCollectorClass") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsInterval") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsEnabled") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.JmxMetricsCollector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.SigarMetricsCollector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricNumericConverter") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.ClusterEvent$ClusterMetricsChanged") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsGossipEnvelope") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.NodeMetrics") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$Cpu$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$Cpu") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.InternalClusterAction$PublisherCreated") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.EWMA") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsGossip$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.InternalClusterAction$PublisherCreated$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.NodeMetrics$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsGossipEnvelope$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.ClusterMetricsCollector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.EWMA$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$HeapMemory") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsGossip") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.ClusterEvent$ClusterMetricsChanged$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$HeapMemory$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.SystemLoadAverageMetricsSelector$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingMetricsListener") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.WeightedRoutees") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingPool") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.CpuMetricsSelector$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MixMetricsSelector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.CapacityMetricsSelector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.SystemLoadAverageMetricsSelector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingRoutingLogic") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.HeapMetricsSelector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingPool$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.CpuMetricsSelector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingRoutingLogic$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.HeapMetricsSelector$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MetricsSelector$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingGroup$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MixMetricsSelectorBase") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingGroup") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MixMetricsSelector$") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MetricsSelector") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$EWMA$Builder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$MetricOrBuilder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Number") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$NumberType") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossipEnvelopeOrBuilder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Builder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetricsOrBuilder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$NumberOrBuilder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$EWMA") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossip$Builder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossipOrBuilder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossipEnvelope") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossip") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossipEnvelope$Builder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$EWMAOrBuilder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Metric") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Metric$Builder") +ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Number$Builder") + +# #21537 coordinated shutdown +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterCoreDaemon.removed") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.convergence") + +# #21423 removal of deprecated serializer constructors (in 2.5.x) +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.protobuf.ClusterMessageSerializer.this") + +# #21423 remove deprecated methods in routing +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.routing.ClusterRouterGroup.paths") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.routing.ClusterRouterPool.nrOfInstances") + +# #21944 +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ClusterEvent#ReachabilityEvent.member") diff --git a/akka-cluster/src/main/mima-filters/2.5.4.backwards.excludes b/akka-cluster/src/main/mima-filters/2.5.4.backwards.excludes new file mode 100644 index 0000000000..51edebc818 --- /dev/null +++ b/akka-cluster/src/main/mima-filters/2.5.4.backwards.excludes @@ -0,0 +1,7 @@ +# #23257 replace ClusterRouterGroup/Pool "use-role" with "use-roles" +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#ClusterRouterPoolSettingsOrBuilder.getUseRoles") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#ClusterRouterPoolSettingsOrBuilder.getUseRolesBytes") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#ClusterRouterPoolSettingsOrBuilder.getUseRolesCount") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#ClusterRouterPoolSettingsOrBuilder.getUseRolesList") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.routing.ClusterRouterSettingsBase.useRole") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.routing.ClusterRouterSettingsBase.useRoles") \ No newline at end of file diff --git a/akka-cluster/src/main/protobuf/ClusterMessages.proto b/akka-cluster/src/main/protobuf/ClusterMessages.proto index 9ff9926337..b5fea5882b 100644 --- a/akka-cluster/src/main/protobuf/ClusterMessages.proto +++ b/akka-cluster/src/main/protobuf/ClusterMessages.proto @@ -229,4 +229,5 @@ message UniqueAddress { required uint32 maxInstancesPerNode = 2; required bool allowLocalRoutees = 3; optional string useRole = 4; + repeated string useRoles = 5; } \ No newline at end of file diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index c061fd63a1..d244c02722 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -291,9 +291,12 @@ akka { # Useful for master-worker scenario where all routees are remote. allow-local-routees = on + # Use members with all specified roles, or all members if undefined or empty. + use-roles = [] + + # Deprecated, since Akka 2.5.4, replaced by use-roles # Use members with specified role, or all members if undefined or empty. use-role = "" - } # Protobuf serializer for cluster messages diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 29a6c8a884..9c9c420270 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -73,7 +73,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { } val SeedNodes: immutable.IndexedSeq[Address] = - immutableSeq(cc.getStringList("seed-nodes")).map { case AddressFromURIString(addr) ⇒ addr }.toVector + immutableSeq(cc.getStringList("seed-nodes")).map { case AddressFromURIString(address) ⇒ address }.toVector val SeedNodeTimeout: FiniteDuration = cc.getMillisDuration("seed-node-timeout") val RetryUnsuccessfulJoinAfter: Duration = { val key = "retry-unsuccessful-join-after" diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index 7af7cc9ced..3db36d2230 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -13,8 +13,8 @@ import akka.serialization.{ BaseSerializer, SerializationExtension, SerializerWi import akka.protobuf.{ ByteString, MessageLite } import scala.annotation.tailrec -import scala.collection.JavaConverters._ import scala.collection.immutable +import scala.collection.JavaConverters._ import scala.concurrent.duration.Deadline import java.io.NotSerializableException @@ -166,8 +166,11 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri builder.setAllowLocalRoutees(settings.allowLocalRoutees) .setMaxInstancesPerNode(settings.maxInstancesPerNode) .setTotalInstances(settings.totalInstances) + .addAllUseRoles(settings.useRoles.asJava) + // for backwards compatibility settings.useRole.foreach(builder.setUseRole) + builder.build() } @@ -408,11 +411,12 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri } private def clusterRouterPoolSettingsFromProto(crps: cm.ClusterRouterPoolSettings): ClusterRouterPoolSettings = { + // For backwards compatibility, useRoles is the combination of getUseRole and getUseRolesList ClusterRouterPoolSettings( totalInstances = crps.getTotalInstances, maxInstancesPerNode = crps.getMaxInstancesPerNode, allowLocalRoutees = crps.getAllowLocalRoutees, - useRole = if (crps.hasUseRole) Some(crps.getUseRole) else None + useRoles = if (crps.hasUseRole) { crps.getUseRolesList.asScala.toSet + crps.getUseRole } else { crps.getUseRolesList.asScala.toSet } ) } diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala index a1697d39a2..cc707ffbfb 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala @@ -26,16 +26,26 @@ import akka.routing.RoutingLogic import com.typesafe.config.Config import com.typesafe.config.ConfigFactory -import scala.annotation.tailrec +import scala.annotation.{ tailrec, varargs } import scala.collection.immutable +import scala.collection.JavaConverters._ object ClusterRouterGroupSettings { + @deprecated("useRole has been replaced with useRoles", since = "2.5.4") + def apply(totalInstances: Int, routeesPaths: immutable.Seq[String], allowLocalRoutees: Boolean, useRole: Option[String]): ClusterRouterGroupSettings = + ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRole.toSet) + + @varargs + def apply(totalInstances: Int, routeesPaths: immutable.Seq[String], allowLocalRoutees: Boolean, useRoles: String*): ClusterRouterGroupSettings = + ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.toSet) + + // For backwards compatibility, useRoles is the combination of use-roles and use-role def fromConfig(config: Config): ClusterRouterGroupSettings = ClusterRouterGroupSettings( totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config), routeesPaths = immutableSeq(config.getStringList("routees.paths")), allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), - useRole = ClusterRouterSettingsBase.useRoleOption(config.getString("cluster.use-role"))) + useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption(config.getString("cluster.use-role"))) } /** @@ -46,33 +56,71 @@ final case class ClusterRouterGroupSettings( totalInstances: Int, routeesPaths: immutable.Seq[String], allowLocalRoutees: Boolean, - useRole: Option[String]) extends ClusterRouterSettingsBase { + useRoles: Set[String]) extends ClusterRouterSettingsBase { + + // For binary compatibility + @deprecated("useRole has been replaced with useRoles", since = "2.5.4") + def useRole: Option[String] = useRoles.headOption + + @deprecated("useRole has been replaced with useRoles", since = "2.5.4") + def this(totalInstances: Int, routeesPaths: immutable.Seq[String], allowLocalRoutees: Boolean, useRole: Option[String]) = + this(totalInstances, routeesPaths, allowLocalRoutees, useRole.toSet) /** * Java API */ + @deprecated("useRole has been replaced with useRoles", since = "2.5.4") def this(totalInstances: Int, routeesPaths: java.lang.Iterable[String], allowLocalRoutees: Boolean, useRole: String) = - this(totalInstances, immutableSeq(routeesPaths), allowLocalRoutees, ClusterRouterSettingsBase.useRoleOption(useRole)) + this(totalInstances, immutableSeq(routeesPaths), allowLocalRoutees, Option(useRole).toSet) + + /** + * Java API + */ + def this(totalInstances: Int, routeesPaths: java.lang.Iterable[String], allowLocalRoutees: Boolean, useRoles: java.util.Set[String]) = + this(totalInstances, immutableSeq(routeesPaths), allowLocalRoutees, useRoles.asScala.toSet) + + // For binary compatibility + @deprecated("Use constructor with useRoles instead", since = "2.5.4") + def copy(totalInstances: Int = totalInstances, routeesPaths: immutable.Seq[String] = routeesPaths, allowLocalRoutees: Boolean = allowLocalRoutees, useRole: Option[String] = useRole): ClusterRouterGroupSettings = + new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRole) if (totalInstances <= 0) throw new IllegalArgumentException("totalInstances of cluster router must be > 0") if ((routeesPaths eq null) || routeesPaths.isEmpty || routeesPaths.head == "") throw new IllegalArgumentException("routeesPaths must be defined") - routeesPaths.foreach(p ⇒ p match { + routeesPaths.foreach { case RelativeActorPath(elements) ⇒ // good - case _ ⇒ + case p ⇒ throw new IllegalArgumentException(s"routeesPaths [$p] is not a valid actor path without address information") - }) + } + def withUseRoles(useRoles: Set[String]): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles) + + @varargs + def withUseRoles(useRoles: String*): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.toSet) + + /** + * Java API + */ + def withUseRoles(useRoles: java.util.Set[String]): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.asScala.toSet) } object ClusterRouterPoolSettings { + @deprecated("useRole has been replaced with useRoles", since = "2.5.4") + def apply(totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, useRole: Option[String]): ClusterRouterPoolSettings = + ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole.toSet) + + @varargs + def apply(totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, useRoles: String*): ClusterRouterPoolSettings = + ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.toSet) + + // For backwards compatibility, useRoles is the combination of use-roles and use-role def fromConfig(config: Config): ClusterRouterPoolSettings = ClusterRouterPoolSettings( totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config), maxInstancesPerNode = config.getInt("cluster.max-nr-of-instances-per-node"), allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), - useRole = ClusterRouterSettingsBase.useRoleOption(config.getString("cluster.use-role"))) + useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption(config.getString("cluster.use-role"))) } /** @@ -85,16 +133,45 @@ final case class ClusterRouterPoolSettings( totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, - useRole: Option[String]) extends ClusterRouterSettingsBase { + useRoles: Set[String]) extends ClusterRouterSettingsBase { + + // For binary compatibility + @deprecated("useRole has been replaced with useRoles", since = "2.5.4") + def useRole: Option[String] = useRoles.headOption + + @deprecated("useRole has been replaced with useRoles", since = "2.5.4") + def this(totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, useRole: Option[String]) = + this(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole.toSet) /** * Java API */ + @deprecated("useRole has been replaced with useRoles", since = "2.5.4") def this(totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, useRole: String) = - this(totalInstances, maxInstancesPerNode, allowLocalRoutees, ClusterRouterSettingsBase.useRoleOption(useRole)) + this(totalInstances, maxInstancesPerNode, allowLocalRoutees, Option(useRole).toSet) + + /** + * Java API + */ + def this(totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, useRoles: java.util.Set[String]) = + this(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.asScala.toSet) + + // For binary compatibility + @deprecated("Use copy with useRoles instead", since = "2.5.4") + def copy(totalInstances: Int = totalInstances, maxInstancesPerNode: Int = maxInstancesPerNode, allowLocalRoutees: Boolean = allowLocalRoutees, useRole: Option[String] = useRole): ClusterRouterPoolSettings = + new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole) if (maxInstancesPerNode <= 0) throw new IllegalArgumentException("maxInstancesPerNode of cluster pool router must be > 0") + def withUseRoles(useRoles: Set[String]): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles) + + @varargs + def withUseRoles(useRoles: String*): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.toSet) + + /** + * Java API + */ + def withUseRoles(useRoles: java.util.Set[String]): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.asScala.toSet) } /** @@ -125,10 +202,11 @@ private[akka] object ClusterRouterSettingsBase { private[akka] trait ClusterRouterSettingsBase { def totalInstances: Int def allowLocalRoutees: Boolean - def useRole: Option[String] + def useRoles: Set[String] - require(useRole.isEmpty || useRole.get.nonEmpty, "useRole must be either None or non-empty Some wrapped role") require(totalInstances > 0, "totalInstances of cluster router must be > 0") + require(useRoles != null, "useRoles must be non-null") + require(!useRoles.exists(role ⇒ role == null || role.isEmpty), "All roles in useRoles must be non-empty") } /** @@ -141,11 +219,11 @@ private[akka] trait ClusterRouterSettingsBase { final case class ClusterRouterGroup(local: Group, settings: ClusterRouterGroupSettings) extends Group with ClusterRouterConfigBase { override def paths(system: ActorSystem): immutable.Iterable[String] = - if (settings.allowLocalRoutees && settings.useRole.isDefined) { - if (Cluster(system).selfRoles.contains(settings.useRole.get)) { + if (settings.allowLocalRoutees && settings.useRoles.nonEmpty) { + if (settings.useRoles.subsetOf(Cluster(system).selfRoles)) { settings.routeesPaths } else Nil - } else if (settings.allowLocalRoutees && settings.useRole.isEmpty) { + } else if (settings.allowLocalRoutees && settings.useRoles.isEmpty) { settings.routeesPaths } else Nil @@ -157,8 +235,8 @@ final case class ClusterRouterGroup(local: Group, settings: ClusterRouterGroupSe override def withFallback(other: RouterConfig): RouterConfig = other match { case ClusterRouterGroup(_: ClusterRouterGroup, _) ⇒ throw new IllegalStateException( "ClusterRouterGroup is not allowed to wrap a ClusterRouterGroup") - case ClusterRouterGroup(local, _) ⇒ - copy(local = this.local.withFallback(local).asInstanceOf[Group]) + case ClusterRouterGroup(otherLocal, _) ⇒ + copy(local = this.local.withFallback(otherLocal).asInstanceOf[Group]) case _ ⇒ copy(local = this.local.withFallback(other).asInstanceOf[Group]) } @@ -192,11 +270,11 @@ final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSetti * Initial number of routee instances */ override def nrOfInstances(sys: ActorSystem): Int = - if (settings.allowLocalRoutees && settings.useRole.isDefined) { - if (Cluster(sys).selfRoles.contains(settings.useRole.get)) { + if (settings.allowLocalRoutees && settings.useRoles.nonEmpty) { + if (settings.useRoles.subsetOf(Cluster(sys).selfRoles)) { settings.maxInstancesPerNode } else 0 - } else if (settings.allowLocalRoutees && settings.useRole.isEmpty) { + } else if (settings.allowLocalRoutees && settings.useRoles.isEmpty) { settings.maxInstancesPerNode } else 0 @@ -234,7 +312,7 @@ private[akka] trait ClusterRouterConfigBase extends RouterConfig { // Intercept ClusterDomainEvent and route them to the ClusterRouterActor override def isManagementMessage(msg: Any): Boolean = - (msg.isInstanceOf[ClusterDomainEvent]) || msg.isInstanceOf[CurrentClusterState] || super.isManagementMessage(msg) + msg.isInstanceOf[ClusterDomainEvent] || msg.isInstanceOf[CurrentClusterState] || super.isManagementMessage(msg) } /** @@ -383,17 +461,14 @@ private[akka] trait ClusterRouterActor { this: RouterActor ⇒ def isAvailable(m: Member): Boolean = (m.status == MemberStatus.Up || m.status == MemberStatus.WeaklyUp) && - satisfiesRole(m.roles) && + satisfiesRoles(m.roles) && (settings.allowLocalRoutees || m.address != cluster.selfAddress) - private def satisfiesRole(memberRoles: Set[String]): Boolean = settings.useRole match { - case None ⇒ true - case Some(r) ⇒ memberRoles.contains(r) - } + private def satisfiesRoles(memberRoles: Set[String]): Boolean = settings.useRoles.subsetOf(memberRoles) def availableNodes: immutable.SortedSet[Address] = { import akka.cluster.Member.addressOrdering - if (nodes.isEmpty && settings.allowLocalRoutees && satisfiesRole(cluster.selfRoles)) + if (nodes.isEmpty && settings.allowLocalRoutees && satisfiesRoles(cluster.selfRoles)) // use my own node, cluster information not updated yet immutable.SortedSet(cluster.selfAddress) else @@ -404,11 +479,11 @@ private[akka] trait ClusterRouterActor { this: RouterActor ⇒ * Fills in self address for local ActorRef */ def fullAddress(routee: Routee): Address = { - val a = routee match { + val address = routee match { case ActorRefRoutee(ref) ⇒ ref.path.address case ActorSelectionRoutee(sel) ⇒ sel.anchor.path.address } - a match { + address match { case Address(_, _, None, None) ⇒ cluster.selfAddress case a ⇒ a } @@ -457,5 +532,4 @@ private[akka] trait ClusterRouterActor { this: RouterActor ⇒ case ReachableMember(m) ⇒ if (isAvailable(m)) addMember(m) } -} - +} \ No newline at end of file diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala index f17bd01ce7..f919644b0a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala @@ -76,7 +76,7 @@ abstract class ClusterConsistentHashingGroupSpec extends MultiNodeSpec(ClusterCo val router = system.actorOf( ClusterRouterGroup( local = ConsistentHashingGroup(paths, hashMapping = hashMapping), - settings = ClusterRouterGroupSettings(totalInstances = 10, paths, allowLocalRoutees = true, useRole = None)).props(), + settings = ClusterRouterGroupSettings(totalInstances = 10, paths, allowLocalRoutees = true)).props(), "router") // it may take some time until router receives cluster member events awaitAssert { currentRoutees(router).size should ===(3) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala index 7237b1c94a..de985cd6c3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala @@ -124,7 +124,7 @@ abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterC val router2 = system.actorOf( ClusterRouterPool( local = ConsistentHashingPool(nrOfInstances = 0), - settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = None)). + settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true)). props(Props[Echo]), "router2") // it may take some time until router receives cluster member events @@ -159,7 +159,7 @@ abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterC val router4 = system.actorOf( ClusterRouterPool( local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping), - settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)). + settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true)). props(Props[Echo]), "router4") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala index c867015686..9e40cc866a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala @@ -85,7 +85,7 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { router = round-robin-pool cluster { enabled = on - use-role = a + use-roles = ["a"] max-total-nr-of-instances = 10 } } @@ -115,7 +115,7 @@ abstract class ClusterRoundRobinSpec extends MultiNodeSpec(ClusterRoundRobinMult lazy val router2 = system.actorOf( ClusterRouterPool( RoundRobinPool(nrOfInstances = 0), - ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)). + ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true)). props(Props[SomeActor]), "router2") lazy val router3 = system.actorOf(FromConfig.props(Props[SomeActor]), "router3") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala index 0816429683..ecee9c051b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala @@ -99,12 +99,12 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp "pool local: off, roles: off, 6 => 0,2,2" taggedAs LongRunningTest in { runOn(first) { - val role = Some("b") + val roles = Set("b") val router = system.actorOf( ClusterRouterPool( RoundRobinPool(nrOfInstances = 6), - ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = false, useRole = role)). + ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = false, useRoles = roles)). props(Props[SomeActor]), "router-2") @@ -129,13 +129,13 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp "group local: off, roles: off, 6 => 0,2,2" taggedAs LongRunningTest in { runOn(first) { - val role = Some("b") + val roles = Set("b") val router = system.actorOf( ClusterRouterGroup( RoundRobinGroup(paths = Nil), ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"), - allowLocalRoutees = false, useRole = role)).props, + allowLocalRoutees = false, useRoles = roles)).props, "router-2b") awaitAssert(currentRoutees(router).size should ===(4)) @@ -159,12 +159,12 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp "pool local: on, role: b, 6 => 0,2,2" taggedAs LongRunningTest in { runOn(first) { - val role = Some("b") + val roles = Set("b") val router = system.actorOf( ClusterRouterPool( RoundRobinPool(nrOfInstances = 6), - ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)). + ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRoles = roles)). props(Props[SomeActor]), "router-3") @@ -189,13 +189,13 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp "group local: on, role: b, 6 => 0,2,2" taggedAs LongRunningTest in { runOn(first) { - val role = Some("b") + val roles = Set("b") val router = system.actorOf( ClusterRouterGroup( RoundRobinGroup(paths = Nil), ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"), - allowLocalRoutees = true, useRole = role)).props, + allowLocalRoutees = true, useRoles = roles)).props, "router-3b") awaitAssert(currentRoutees(router).size should ===(4)) @@ -219,12 +219,12 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp "pool local: on, role: a, 6 => 2,0,0" taggedAs LongRunningTest in { runOn(first) { - val role = Some("a") + val roles = Set("a") val router = system.actorOf( ClusterRouterPool( RoundRobinPool(nrOfInstances = 6), - ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)). + ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRoles = roles)). props(Props[SomeActor]), "router-4") @@ -249,13 +249,13 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp "group local: on, role: a, 6 => 2,0,0" taggedAs LongRunningTest in { runOn(first) { - val role = Some("a") + val roles = Set("a") val router = system.actorOf( ClusterRouterGroup( RoundRobinGroup(paths = Nil), ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"), - allowLocalRoutees = true, useRole = role)).props, + allowLocalRoutees = true, useRoles = roles)).props, "router-4b") awaitAssert(currentRoutees(router).size should ===(2)) @@ -279,12 +279,12 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp "pool local: on, role: c, 6 => 2,2,2" taggedAs LongRunningTest in { runOn(first) { - val role = Some("c") + val roles = Set("c") val router = system.actorOf( ClusterRouterPool( RoundRobinPool(nrOfInstances = 6), - ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)). + ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRoles = roles)). props(Props[SomeActor]), "router-5") @@ -309,13 +309,13 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp "group local: on, role: c, 6 => 2,2,2" taggedAs LongRunningTest in { runOn(first) { - val role = Some("c") + val roles = Set("c") val router = system.actorOf( ClusterRouterGroup( RoundRobinGroup(paths = Nil), ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"), - allowLocalRoutees = true, useRole = role)).props, + allowLocalRoutees = true, useRoles = roles)).props, "router-5b") awaitAssert(currentRoutees(router).size should ===(6)) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala index 60db31e27d..b03376e559 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala @@ -57,7 +57,7 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { service, deployment.get.config, ClusterRouterPool(RoundRobinPool(20), ClusterRouterPoolSettings( - totalInstances = 20, maxInstancesPerNode = 3, allowLocalRoutees = false, useRole = None)), + totalInstances = 20, maxInstancesPerNode = 3, allowLocalRoutees = false)), ClusterScope, Deploy.NoDispatcherGiven, Deploy.NoMailboxGiven))) @@ -73,7 +73,7 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { service, deployment.get.config, ClusterRouterGroup(RoundRobinGroup(List("/user/myservice")), ClusterRouterGroupSettings( - totalInstances = 20, routeesPaths = List("/user/myservice"), allowLocalRoutees = false, useRole = None)), + totalInstances = 20, routeesPaths = List("/user/myservice"), allowLocalRoutees = false)), ClusterScope, "mydispatcher", "mymailbox"))) diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala index 8c7a174751..d1aeeda936 100644 --- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala @@ -4,12 +4,12 @@ package akka.cluster.protobuf import akka.cluster._ -import akka.actor.{ Address, ExtendedActorSystem } +import akka.actor.{ ActorSystem, Address, ExtendedActorSystem } import akka.cluster.routing.{ ClusterRouterPool, ClusterRouterPoolSettings } -import akka.routing.{ DefaultOptimalSizeExploringResizer, RoundRobinPool } +import akka.routing.RoundRobinPool import collection.immutable.SortedSet -import akka.testkit.AkkaSpec +import akka.testkit.{ AkkaSpec, TestKit } class ClusterMessageSerializerSpec extends AkkaSpec( "akka.actor.provider = cluster") { @@ -80,6 +80,41 @@ class ClusterMessageSerializerSpec extends AkkaSpec( checkSerialization(InternalClusterAction.Welcome(uniqueAddress, g2)) } + "be compatible with wire format of version 2.5.3 (using use-role instead of use-roles)" in { + val system = ActorSystem("ClusterMessageSerializer-old-wire-format") + + try { + val serializer = new ClusterMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) + + // the oldSnapshot was created with the version of ClusterRouterPoolSettings in Akka 2.5.3. See issue #23257. + // It was created with: + /* + import org.apache.commons.codec.binary.Hex.encodeHex + val bytes = serializer.toBinary( + ClusterRouterPool(RoundRobinPool(nrOfInstances = 4), ClusterRouterPoolSettings(123, 345, true, Some("role ABC")))) + println(String.valueOf(encodeHex(bytes))) + */ + + val oldBytesHex = "0a0f08101205524f5252501a04080418001211087b10d90218012208726f6c6520414243" + + import org.apache.commons.codec.binary.Hex.decodeHex + val oldBytes = decodeHex(oldBytesHex.toCharArray) + val result = serializer.fromBinary(oldBytes, classOf[ClusterRouterPool]) + + result match { + case pool: ClusterRouterPool ⇒ + pool.settings.totalInstances should ===(123) + pool.settings.maxInstancesPerNode should ===(345) + pool.settings.allowLocalRoutees should ===(true) + pool.settings.useRole should ===(Some("role ABC")) + pool.settings.useRoles should ===(Set("role ABC")) + } + } finally { + TestKit.shutdownActorSystem(system) + } + + } + "add a default data center role if none is present" in { val env = roundtrip(GossipEnvelope(a1.uniqueAddress, d1.uniqueAddress, Gossip(SortedSet(a1, d1)))) env.gossip.members.head.roles should be(Set(ClusterSettings.DcRolePrefix + "default")) @@ -87,7 +122,34 @@ class ClusterMessageSerializerSpec extends AkkaSpec( } } "Cluster router pool" must { - "be serializable" in { + "be serializable with no role" in { + checkSerialization(ClusterRouterPool( + RoundRobinPool( + nrOfInstances = 4 + ), + ClusterRouterPoolSettings( + totalInstances = 2, + maxInstancesPerNode = 5, + allowLocalRoutees = true + ) + )) + } + + "be serializable with one role" in { + checkSerialization(ClusterRouterPool( + RoundRobinPool( + nrOfInstances = 4 + ), + ClusterRouterPoolSettings( + totalInstances = 2, + maxInstancesPerNode = 5, + allowLocalRoutees = true, + useRoles = Set("Richard, Duke of Gloucester") + ) + )) + } + + "be serializable with many roles" in { checkSerialization(ClusterRouterPool( RoundRobinPool( nrOfInstances = 4), @@ -95,7 +157,9 @@ class ClusterMessageSerializerSpec extends AkkaSpec( totalInstances = 2, maxInstancesPerNode = 5, allowLocalRoutees = true, - useRole = Some("Richard, Duke of Gloucester")))) + useRoles = Set("Richard, Duke of Gloucester", "Hongzhi Emperor", "Red Rackham") + ) + )) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala index 6f36ed573f..75b6f6d24f 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala @@ -41,8 +41,7 @@ class ClusterRouterSupervisorSpec extends AkkaSpec(""" }), ClusterRouterPoolSettings( totalInstances = 1, maxInstancesPerNode = 1, - allowLocalRoutees = true, - useRole = None)). + allowLocalRoutees = true)). props(Props(classOf[KillableActor], testActor)), name = "therouter") router ! "go away" diff --git a/akka-distributed-data/src/main/mima-filters/2.4.0.backwards.excludes b/akka-distributed-data/src/main/mima-filters/2.4.0.backwards.excludes new file mode 100644 index 0000000000..5ba57065ae --- /dev/null +++ b/akka-distributed-data/src/main/mima-filters/2.4.0.backwards.excludes @@ -0,0 +1,2 @@ +# #18328 optimize VersionVector for size 1 +ProblemFilters.exclude[Problem]("akka.cluster.ddata.VersionVector*") diff --git a/akka-distributed-data/src/main/mima-filters/2.4.10.backwards.excludes b/akka-distributed-data/src/main/mima-filters/2.4.10.backwards.excludes new file mode 100644 index 0000000000..5d370e5db0 --- /dev/null +++ b/akka-distributed-data/src/main/mima-filters/2.4.10.backwards.excludes @@ -0,0 +1,3 @@ +# #20644 long uids +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.msg.ReplicatorMessages#UniqueAddressOrBuilder.hasUid2") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.msg.ReplicatorMessages#UniqueAddressOrBuilder.getUid2") diff --git a/akka-distributed-data/src/main/mima-filters/2.4.14.backwards.excludes b/akka-distributed-data/src/main/mima-filters/2.4.14.backwards.excludes new file mode 100644 index 0000000000..6c095f0771 --- /dev/null +++ b/akka-distributed-data/src/main/mima-filters/2.4.14.backwards.excludes @@ -0,0 +1,4 @@ +# #21645 durable distributed data +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.WriteAggregator.props") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.WriteAggregator.this") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.Replicator.write") diff --git a/akka-distributed-data/src/main/mima-filters/2.4.x.backwards.excludes b/akka-distributed-data/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..b98193d0f6 --- /dev/null +++ b/akka-distributed-data/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,67 @@ +# #22269 GSet as delta-CRDT +# constructor supplied by companion object +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.GSet.this") + +# #21875 delta-CRDT +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.GCounter.this") + +# #22188 ORSet delta-CRDT +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.ORSet.this") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorToProto") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorFromProto") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorFromBinary") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.protobuf.ReplicatedDataSerializer.versionVectorToProto") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.protobuf.ReplicatedDataSerializer.versionVectorFromProto") + +# #21647 pruning +ProblemFilters.exclude[Problem]("akka.cluster.ddata.PruningState*") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.RemovedNodePruning.modifiedByNodes") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.RemovedNodePruning.usingNodes") +ProblemFilters.exclude[Problem]("akka.cluster.ddata.Replicator*") +ProblemFilters.exclude[Problem]("akka.cluster.ddata.protobuf.msg*") + +# #21648 Prefer reachable nodes in consistency writes/reads +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.ReadWriteAggregator.unreachable") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.WriteAggregator.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.WriteAggregator.props") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.ReadAggregator.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.ReadAggregator.props") + +# #22035 Make it possible to use anything as the key in a map +ProblemFilters.exclude[Problem]("akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages*") +ProblemFilters.exclude[Problem]("akka.cluster.ddata.ORMap*") +ProblemFilters.exclude[Problem]("akka.cluster.ddata.LWWMap*") +ProblemFilters.exclude[Problem]("akka.cluster.ddata.PNCounterMap*") +ProblemFilters.exclude[Problem]("akka.cluster.ddata.ORMultiMap*") + +# #20140 durable distributed data +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReplicationDeleteFailure.apply") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteSuccess.apply") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteResponse.getRequest") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteResponse.request") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.Replicator#Command.request") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator.receiveDelete") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReplicationDeleteFailure.copy") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReplicationDeleteFailure.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteSuccess.copy") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteSuccess.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#Delete.apply") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DataDeleted.apply") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DataDeleted.copy") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DataDeleted.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#Delete.copy") + +# #21618 distributed data +ProblemFilters.exclude[MissingTypesProblem]("akka.cluster.ddata.Replicator$ReadMajority$") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReadMajority.copy") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReadMajority.apply") +ProblemFilters.exclude[MissingTypesProblem]("akka.cluster.ddata.Replicator$WriteMajority$") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#WriteMajority.copy") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#WriteMajority.apply") + +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.DurableStore#Store.apply") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.DurableStore#Store.copy$default$2") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.DurableStore#Store.data") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.DurableStore#Store.copy") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.DurableStore#Store.this") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.LmdbDurableStore.dbPut") diff --git a/akka-distributed-data/src/main/mima-filters/2.5.0.backwards.excludes b/akka-distributed-data/src/main/mima-filters/2.5.0.backwards.excludes new file mode 100644 index 0000000000..99a213288d --- /dev/null +++ b/akka-distributed-data/src/main/mima-filters/2.5.0.backwards.excludes @@ -0,0 +1,6 @@ +# #22759 LMDB files +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.env") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.db") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.keyBuffer") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.valueBuffer_=") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.valueBuffer") diff --git a/akka-distributed-data/src/main/mima-filters/2.5.2.backwards.excludes b/akka-distributed-data/src/main/mima-filters/2.5.2.backwards.excludes new file mode 100644 index 0000000000..d358d2198b --- /dev/null +++ b/akka-distributed-data/src/main/mima-filters/2.5.2.backwards.excludes @@ -0,0 +1,2 @@ +# #23025 OversizedPayloadException DeltaPropagation +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.DeltaPropagationSelector.maxDeltaSize") diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index 0c2291c936..9a71342d29 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -523,7 +523,8 @@ object Replicator { /** Java API */ def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) } - final case class UpdateSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends UpdateResponse[A] + final case class UpdateSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any]) + extends UpdateResponse[A] with DeadLetterSuppression sealed abstract class UpdateFailure[A <: ReplicatedData] extends UpdateResponse[A] /** diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala index 39986c538f..cc4ff472f9 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala @@ -81,7 +81,7 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) implicit val cluster = Cluster(system) - val timeout = 5.seconds.dilated + val timeout = 14.seconds.dilated // initialization of lmdb can be very slow in CI environment val writeTwo = WriteTo(2, timeout) val readTwo = ReadFrom(2, timeout) @@ -238,9 +238,9 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) runOn(first) { val sys1 = ActorSystem("AdditionalSys", system.settings.config) - val addr = Cluster(sys1).selfAddress + val address = Cluster(sys1).selfAddress try { - Cluster(sys1).join(addr) + Cluster(sys1).join(address) new TestKit(sys1) with ImplicitSender { val r = newReplicator(sys1) @@ -276,11 +276,11 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) "AdditionalSys", // use the same port ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${addr.port.get} - akka.remote.netty.tcp.port = ${addr.port.get} + akka.remote.artery.canonical.port = ${address.port.get} + akka.remote.netty.tcp.port = ${address.port.get} """).withFallback(system.settings.config)) try { - Cluster(sys2).join(addr) + Cluster(sys2).join(address) new TestKit(sys2) with ImplicitSender { val r2: ActorRef = newReplicator(sys2) diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala index daa222b28a..11f3e771d5 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala @@ -148,10 +148,10 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN enterBarrier("pruned") runOn(first) { - val addr = cluster2.selfAddress + val address = cluster2.selfAddress val sys3 = ActorSystem(system.name, ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${addr.port.get} - akka.remote.netty.tcp.port = ${addr.port.get} + akka.remote.artery.canonical.port = ${address.port.get} + akka.remote.netty.tcp.port = ${address.port.get} """).withFallback(system.settings.config)) val cluster3 = Cluster(sys3) val replicator3 = startReplicator(sys3) diff --git a/akka-docs/build.sbt b/akka-docs/build.sbt index ce652673d3..9ff70471a1 100644 --- a/akka-docs/build.sbt +++ b/akka-docs/build.sbt @@ -16,10 +16,20 @@ enablePlugins(AkkaParadoxPlugin) name in (Compile, paradox) := "Akka" +val paradoxBrowse = taskKey[Unit]("Open the docs in the default browser") +paradoxBrowse := { + import java.awt.Desktop + val rootDocFile = (target in (Compile, paradox)).value / "index.html" + val log = streams.value.log + if (!rootDocFile.exists()) log.info("No generated docs found, generate with the 'paradox' task") + else if (Desktop.isDesktopSupported) Desktop.getDesktop.open(rootDocFile) + else log.info(s"Couldn't open default browser, but docs are at $rootDocFile") +} + paradoxProperties ++= Map( "akka.canonical.base_url" -> "http://doc.akka.io/docs/akka/current", "github.base_url" -> GitHub.url(version.value), // for links like this: @github[#1](#1) or @github[83986f9](83986f9) - "extref.akka.http.base_url" -> "http://doc.akka.io/docs/akka-http/current", + "extref.akka.http.base_url" -> "http://doc.akka.io/docs/akka-http/current/%s", "extref.wikipedia.base_url" -> "https://en.wikipedia.org/wiki/%s", "extref.github.base_url" -> (GitHub.url(version.value) + "/%s"), // for links to our sources "extref.samples.base_url" -> "https://github.com/akka/akka-samples/tree/2.5/%s", diff --git a/akka-docs/src/main/paradox/index.md b/akka-docs/src/main/paradox/index.md index 486ea75cf7..581acc7149 100644 --- a/akka-docs/src/main/paradox/index.md +++ b/akka-docs/src/main/paradox/index.md @@ -1,4 +1,4 @@ # Contents -* @ref[Java Documentation](java/index.md) -* @ref[Scala Documentation](scala/index.md) +* @ref:[Java Documentation](java/index.md) +* @ref:[Scala Documentation](scala/index.md) diff --git a/akka-docs/src/main/paradox/java/camel.md b/akka-docs/src/main/paradox/java/camel.md deleted file mode 100644 index 7221901871..0000000000 --- a/akka-docs/src/main/paradox/java/camel.md +++ /dev/null @@ -1,415 +0,0 @@ -# Camel - -@@@ warning - -Akka Camel is deprecated in favour of [Alpakka](https://github.com/akka/alpakka) , the Akka Streams based collection of integrations to various endpoints (including Camel). - -@@@ - -## Introduction - -The akka-camel module allows Untyped Actors to receive -and send messages over a great variety of protocols and APIs. -In addition to the native Scala and Java actor API, actors can now exchange messages with other systems over large number -of protocols and APIs such as HTTP, SOAP, TCP, FTP, SMTP or JMS, to mention a -few. At the moment, approximately 80 protocols and APIs are supported. - -### Apache Camel - -The akka-camel module is based on [Apache Camel](http://camel.apache.org/), a powerful and light-weight -integration framework for the JVM. For an introduction to Apache Camel you may -want to read this [Apache Camel article](http://architects.dzone.com/articles/apache-camel-integration). Camel comes with a -large number of [components](http://camel.apache.org/components.html) that provide bindings to different protocols and -APIs. The [camel-extra](http://code.google.com/p/camel-extra/) project provides further components. - -### Consumer - -Here's an example of using Camel's integration components in Akka. - -@@snip [MyEndpoint.java]($code$/java/jdocs/camel/MyEndpoint.java) { #Consumer-mina } - -The above example exposes an actor over a TCP endpoint via Apache -Camel's [Mina component](http://camel.apache.org/mina2.html). The actor implements the *getEndpointUri* method to define -an endpoint from which it can receive messages. After starting the actor, TCP -clients can immediately send messages to and receive responses from that -actor. If the message exchange should go over HTTP (via Camel's Jetty -component), the actor's *getEndpointUri* method should return a different URI, for instance "jetty:[http://localhost:8877/example](http://localhost:8877/example)". -In the above case an extra constructor is added that can set the endpoint URI, which would result in -the *getEndpointUri* returning the URI that was set using this constructor. - -### Producer - -Actors can also trigger message exchanges with external systems i.e. produce to -Camel endpoints. - -@@snip [Orders.java]($code$/java/jdocs/camel/Orders.java) { #Producer } - -In the above example, any message sent to this actor will be sent to -the JMS queue `Orders`. Producer actors may choose from the same set of Camel -components as Consumer actors do. -Below an example of how to send a message to the Orders producer. - -@@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #TellProducer } - -### CamelMessage - -The number of Camel components is constantly increasing. The akka-camel module -can support these in a plug-and-play manner. Just add them to your application's -classpath, define a component-specific endpoint URI and use it to exchange -messages over the component-specific protocols or APIs. This is possible because -Camel components bind protocol-specific message formats to a Camel-specific -[normalized message format](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Message.java). The normalized message format hides -protocol-specific details from Akka and makes it therefore very easy to support -a large number of protocols through a uniform Camel component interface. The -akka-camel module further converts mutable Camel messages into immutable -representations which are used by Consumer and Producer actors for pattern -matching, transformation, serialization or storage. In the above example of the Orders Producer, -the XML message is put in the body of a newly created Camel Message with an empty set of headers. -You can also create a CamelMessage yourself with the appropriate body and headers as you see fit. - -### CamelExtension - -The akka-camel module is implemented as an Akka Extension, the `CamelExtension` object. -Extensions will only be loaded once per `ActorSystem`, which will be managed by Akka. -The `CamelExtension` object provides access to the @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) interface. -The @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) interface in turn provides access to two important Apache Camel objects, the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and the `ProducerTemplate`. -Below you can see how you can get access to these Apache Camel objects. - -@@snip [CamelExtensionTest.java]($code$/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtension } - -One `CamelExtension` is only loaded once for every one `ActorSystem`, which makes it safe to call the `CamelExtension` at any point in your code to get to the -Apache Camel objects associated with it. There is one [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and one `ProducerTemplate` for every one `ActorSystem` that uses a `CamelExtension`. -By Default, a new [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) is created when the `CamelExtension` starts. If you want to inject your own context instead, -you can implement the @extref[ContextProvider](github:akka-camel/src/main/scala/akka/camel/ContextProvider.scala) interface and add the FQCN of your implementation in the config, as the value of the "akka.camel.context-provider". -This interface define a single method `getContext()` used to load the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java). - -Below an example on how to add the ActiveMQ component to the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java), which is required when you would like to use the ActiveMQ component. - -@@snip [CamelExtensionTest.java]($code$/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtensionAddComponent } - -The [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) joins the lifecycle of the `ActorSystem` and `CamelExtension` it is associated with; the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) is started when -the `CamelExtension` is created, and it is shut down when the associated `ActorSystem` is shut down. The same is true for the `ProducerTemplate`. - -The `CamelExtension` is used by both *Producer* and *Consumer* actors to interact with Apache Camel internally. -You can access the `CamelExtension` inside a *Producer* or a *Consumer* using the `camel` method, or get straight at the *CamelContext* -using the `getCamelContext` method or to the *ProducerTemplate* using the *getProducerTemplate* method. -Actors are created and started asynchronously. When a *Consumer* actor is created, the *Consumer* is published at its Camel endpoint -(more precisely, the route is added to the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) from the [Endpoint](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Endpoint.java) to the actor). -When a *Producer* actor is created, a [SendProcessor](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java) and [Endpoint](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Endpoint.java) are created so that the Producer can send messages to it. -Publication is done asynchronously; setting up an endpoint may still be in progress after you have -requested the actor to be created. Some Camel components can take a while to startup, and in some cases you might want to know when the endpoints are activated and ready to be used. -The @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) interface allows you to find out when the endpoint is activated or deactivated. - -@@snip [ActivationTestBase.java]($code$/java/jdocs/camel/ActivationTestBase.java) { #CamelActivation } - -The above code shows that you can get a `Future` to the activation of the route from the endpoint to the actor, or you can wait in a blocking fashion on the activation of the route. -An `ActivationTimeoutException` is thrown if the endpoint could not be activated within the specified timeout. Deactivation works in a similar fashion: - -@@snip [ActivationTestBase.java]($code$/java/jdocs/camel/ActivationTestBase.java) { #CamelDeactivation } - -Deactivation of a Consumer or a Producer actor happens when the actor is terminated. For a Consumer, the route to the actor is stopped. For a Producer, the [SendProcessor](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java) is stopped. -A `DeActivationTimeoutException` is thrown if the associated camel objects could not be deactivated within the specified timeout. - -## Consumer Actors - -For objects to receive messages, they must inherit from the @extref[UntypedConsumerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala) -class. For example, the following actor class (Consumer1) implements the -*getEndpointUri* method, which is declared in the @extref[UntypedConsumerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala) class, in order to receive -messages from the `file:data/input/actor` Camel endpoint. - -@@snip [Consumer1.java]($code$/java/jdocs/camel/Consumer1.java) { #Consumer1 } - -Whenever a file is put into the data/input/actor directory, its content is -picked up by the Camel [file component](http://camel.apache.org/file2.html) and sent as message to the -actor. Messages consumed by actors from Camel endpoints are of type -[CamelMessage](#camelmessage). These are immutable representations of Camel messages. - -Here's another example that sets the endpointUri to -`jetty:http://localhost:8877/camel/default`. It causes Camel's Jetty -component to start an embedded [Jetty](http://www.eclipse.org/jetty/) server, accepting HTTP connections -from localhost on port 8877. - -@@snip [Consumer2.java]($code$/java/jdocs/camel/Consumer2.java) { #Consumer2 } - -After starting the actor, clients can send messages to that actor by POSTing to -`http://localhost:8877/camel/default`. The actor sends a response by using the -`getSender().tell` method. For returning a message body and headers to the HTTP -client the response type should be [CamelMessage](#camelmessage). For any other response type, a -new CamelMessage object is created by akka-camel with the actor response as message -body. - - -### Delivery acknowledgements - -With in-out message exchanges, clients usually know that a message exchange is -done when they receive a reply from a consumer actor. The reply message can be a -CamelMessage (or any object which is then internally converted to a CamelMessage) on -success, and a Failure message on failure. - -With in-only message exchanges, by default, an exchange is done when a message -is added to the consumer actor's mailbox. Any failure or exception that occurs -during processing of that message by the consumer actor cannot be reported back -to the endpoint in this case. To allow consumer actors to positively or -negatively acknowledge the receipt of a message from an in-only message -exchange, they need to override the `autoAck` method to return false. -In this case, consumer actors must reply either with a -special akka.camel.Ack message (positive acknowledgement) or a akka.actor.Status.Failure (negative -acknowledgement). - -@@snip [Consumer3.java]($code$/java/jdocs/camel/Consumer3.java) { #Consumer3 } - - -### Consumer timeout - -Camel Exchanges (and their corresponding endpoints) that support two-way communications need to wait for a response from -an actor before returning it to the initiating client. -For some endpoint types, timeout values can be defined in an endpoint-specific -way which is described in the documentation of the individual Camel -components. Another option is to configure timeouts on the level of consumer actors. - -Two-way communications between a Camel endpoint and an actor are -initiated by sending the request message to the actor with the @extref[ask](github:akka-actor/src/main/scala/akka/pattern/Patterns.scala) pattern -and the actor replies to the endpoint when the response is ready. The ask request to the actor can timeout, which will -result in the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java) failing with a TimeoutException set on the failure of the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java). -The timeout on the consumer actor can be overridden with the `replyTimeout`, as shown below. - -@@snip [Consumer4.java]($code$/java/jdocs/camel/Consumer4.java) { #Consumer4 } - -## Producer Actors - -For sending messages to Camel endpoints, actors need to inherit from the @extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class and implement the getEndpointUri method. - -@@snip [Producer1.java]($code$/java/jdocs/camel/Producer1.java) { #Producer1 } - -Producer1 inherits a default implementation of the onReceive method from the -@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class. To customize a producer actor's default behavior you must override the @extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformResponse and -@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformOutgoingMessage methods. This is explained later in more detail. -Producer Actors cannot override the @extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onReceive method. - -Any message sent to a Producer actor will be sent to -the associated Camel endpoint, in the above example to -`http://localhost:8080/news`. The @extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) always sends messages asynchronously. Response messages (if supported by the -configured endpoint) will, by default, be returned to the original sender. The -following example uses the ask pattern to send a message to a -Producer actor and waits for a response. - -@@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #AskProducer } - -The future contains the response CamelMessage, or an `AkkaCamelException` when an error occurred, which contains the headers of the response. - - -### Custom Processing - -Instead of replying to the initial sender, producer actors can implement custom -response processing by overriding the onRouteResponse method. In the following example, the response -message is forwarded to a target actor instead of being replied to the original -sender. - -@@snip [ResponseReceiver.java]($code$/java/jdocs/camel/ResponseReceiver.java) { #RouteResponse } - -@@snip [Forwarder.java]($code$/java/jdocs/camel/Forwarder.java) { #RouteResponse } - -@@snip [OnRouteResponseTestBase.java]($code$/java/jdocs/camel/OnRouteResponseTestBase.java) { #RouteResponse } - -Before producing messages to endpoints, producer actors can pre-process them by -overriding the @extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformOutgoingMessage method. - -@@snip [Transformer.java]($code$/java/jdocs/camel/Transformer.java) { #TransformOutgoingMessage } - -### Producer configuration options - -The interaction of producer actors with Camel endpoints can be configured to be -one-way or two-way (by initiating in-only or in-out message exchanges, -respectively). By default, the producer initiates an in-out message exchange -with the endpoint. For initiating an in-only exchange, producer actors have to override the isOneway method to return true. - -@@snip [OnewaySender.java]($code$/java/jdocs/camel/OnewaySender.java) { #Oneway } - -### Message correlation - -To correlate request with response messages, applications can set the -*Message.MessageExchangeId* message header. - -@@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #Correlate } - -### ProducerTemplate - -The @extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class is a very convenient way for actors to produce messages to Camel endpoints. -Actors may also use a Camel `ProducerTemplate` for producing messages to endpoints. - -@@snip [MyActor.java]($code$/java/jdocs/camel/MyActor.java) { #ProducerTemplate } - -For initiating a two-way message exchange, one of the -`ProducerTemplate.request*` methods must be used. - -@@snip [RequestBodyActor.java]($code$/java/jdocs/camel/RequestBodyActor.java) { #RequestProducerTemplate } - - -## Asynchronous routing - -In-out message exchanges between endpoints and actors are -designed to be asynchronous. This is the case for both, consumer and producer -actors. - - * A consumer endpoint sends request messages to its consumer actor using the `tell` -method and the actor returns responses with `getSender().tell` once they are -ready. - * A producer actor sends request messages to its endpoint using Camel's -asynchronous routing engine. Asynchronous responses are wrapped and added to the -producer actor's mailbox for later processing. By default, response messages are -returned to the initial sender but this can be overridden by Producer -implementations (see also description of the `onRouteResponse` method -in [Custom Processing](#camel-custom-processing)). - -However, asynchronous two-way message exchanges, without allocating a thread for -the full duration of exchange, cannot be generically supported by Camel's -asynchronous routing engine alone. This must be supported by the individual -Camel components (from which endpoints are created) as well. They must be -able to suspend any work started for request processing (thereby freeing threads -to do other work) and resume processing when the response is ready. This is -currently the case for a [subset of components](http://camel.apache.org/asynchronous-routing-engine.html) such as the Jetty component. -All other Camel components can still be used, of course, but they will cause -allocation of a thread for the duration of an in-out message exchange. There's -also [Examples](#camel-examples) that implements both, an asynchronous -consumer and an asynchronous producer, with the jetty component. - -If the used Camel component is blocking it might be necessary to use a separate -@ref:[dispatcher](dispatchers.md) for the producer. The Camel processor is -invoked by a child actor of the producer and the dispatcher can be defined in -the deployment section of the configuration. For example, if your producer actor -has path `/user/integration/output` the dispatcher of the child actor can be -defined with: - -``` -akka.actor.deployment { - /integration/output/* { - dispatcher = my-dispatcher - } -} -``` - -## Custom Camel routes - -In all the examples so far, routes to consumer actors have been automatically -constructed by akka-camel, when the actor was started. Although the default -route construction templates, used by akka-camel internally, are sufficient for -most use cases, some applications may require more specialized routes to actors. -The akka-camel module provides two mechanisms for customizing routes to actors, -which will be explained in this section. These are: - - * Usage of [Akka Camel components](#camel-components) to access actors. -Any Camel route can use these components to access Akka actors. - * [Intercepting route construction](#camel-intercepting-route-construction) to actors. -This option gives you the ability to change routes that have already been added to Camel. -Consumer actors have a hook into the route definition process which can be used to change the route. - - -### Akka Camel components - -Akka actors can be accessed from Camel routes using the actor Camel component. This component can be used to -access any Akka actor (not only consumer actors) from Camel routes, as described in the following sections. - - -### Access to actors - -To access actors from custom Camel routes, the actor Camel -component should be used. It fully supports Camel's [asynchronous routing -engine](http://camel.apache.org/asynchronous-routing-engine.html). - -This component accepts the following endpoint URI format: - - * `[]?` - -where `` is the `ActorPath` to the actor. The `` are -name-value pairs separated by `&` (i.e. `name1=value1&name2=value2&...`). - -#### URI options - -The following URI options are supported: - -|Name | Type | Default | Description | -|-------------|----------|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|replyTimeout | Duration | false | The reply timeout, specified in the same way that you use the duration in akka, for instance `10 seconds` except that in the url it is handy to use a + between the amount and the unit, like for example `200+millis` See also [Consumer timeout](#camel-timeout).| -|autoAck | Boolean | true | If set to true, in-only message exchanges are auto-acknowledged when the message is added to the actor's mailbox. If set to false, actors must acknowledge the receipt of the message. See also [Delivery acknowledgements](#camel-acknowledgements). | - -Here's an actor endpoint URI example containing an actor path: - -``` -akka://some-system/user/myconsumer?autoAck=false&replyTimeout=100+millis -``` - -In the following example, a custom route to an actor is created, using the -actor's path. - -@@snip [Responder.java]($code$/java/jdocs/camel/Responder.java) { #CustomRoute } - -@@snip [CustomRouteBuilder.java]($code$/java/jdocs/camel/CustomRouteBuilder.java) { #CustomRoute } - -@@snip [CustomRouteTestBase.java]($code$/java/jdocs/camel/CustomRouteTestBase.java) { #CustomRoute } - -The *CamelPath.toCamelUri* converts the *ActorRef* to the Camel actor component URI format which points to the actor endpoint as described above. -When a message is received on the jetty endpoint, it is routed to the Responder actor, which in return replies back to the client of -the HTTP request. - - -### Intercepting route construction - -The previous section, [Akka Camel components](#camel-components), explained how to setup a route to -an actor manually. -It was the application's responsibility to define the route and add it to the current CamelContext. -This section explains a more convenient way to define custom routes: akka-camel is still setting up the routes to consumer actors -(and adds these routes to the current CamelContext) but applications can define extensions to these routes. -Extensions can be defined with Camel's [Java DSL](http://camel.apache.org/dsl.html) or [Scala DSL](http://camel.apache.org/scala-dsl.html). For example, an extension could be a custom error handler that redelivers messages from an endpoint to an actor's bounded mailbox when the mailbox was full. - -The following examples demonstrate how to extend a route to a consumer actor for -handling exceptions thrown by that actor. - -@@snip [ErrorThrowingConsumer.java]($code$/java/jdocs/camel/ErrorThrowingConsumer.java) { #ErrorThrowingConsumer } - -The above ErrorThrowingConsumer sends the Failure back to the sender in preRestart -because the Exception that is thrown in the actor would -otherwise just crash the actor, by default the actor would be restarted, and the response would never reach the client of the Consumer. - -The akka-camel module creates a RouteDefinition instance by calling -from(endpointUri) on a Camel RouteBuilder (where endpointUri is the endpoint URI -of the consumer actor) and passes that instance as argument to the route -definition handler *). The route definition handler then extends the route and -returns a ProcessorDefinition (in the above example, the ProcessorDefinition -returned by the end method. See the [org.apache.camel.model](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/model/) package for -details). After executing the route definition handler, akka-camel finally calls -a to(targetActorUri) on the returned ProcessorDefinition to complete the -route to the consumer actor (where targetActorUri is the actor component URI as described in [Access to actors](#access-to-actors)). -If the actor cannot be found, a *ActorNotRegisteredException* is thrown. - -*) Before passing the RouteDefinition instance to the route definition handler, -akka-camel may make some further modifications to it. - - -## Examples - -The sample named @extref[Akka Camel Samples with Java](ecs:akka-samples-camel-java) (@extref[source code](samples:akka-sample-camel-java)) -contains 3 samples: - - * Asynchronous routing and transformation - This example demonstrates how to implement consumer and -producer actors that support [Asynchronous routing](#camel-asynchronous-routing) with their Camel endpoints. - * Custom Camel route - Demonstrates the combined usage of a `Producer` and a -`Consumer` actor as well as the inclusion of a custom Camel route. - * Quartz Scheduler Example - Showing how simple is to implement a cron-style scheduler by -using the Camel Quartz component - -## Configuration - -There are several configuration properties for the Camel module, please refer -to the @ref:[reference configuration](general/configuration.md#config-akka-camel). - -## Additional Resources - -For an introduction to akka-camel 2, see also the Peter Gabryanczyk's talk [Migrating akka-camel module to Akka 2.x](http://skillsmatter.com/podcast/scala/akka-2-x). - -For an introduction to akka-camel 1, see also the [Appendix E - Akka and Camel](http://www.manning.com/ibsen/appEsample.pdf) -(pdf) of the book [Camel in Action](http://www.manning.com/ibsen/). - -Other, more advanced external articles (for version 1) are: - - * [Akka Consumer Actors: New Features and Best Practices](http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html) - * [Akka Producer Actors: New Features and Best Practices](http://krasserm.blogspot.com/2011/02/akka-producer-actor-new-features-and.html) diff --git a/akka-docs/src/main/paradox/java/camel.md b/akka-docs/src/main/paradox/java/camel.md new file mode 120000 index 0000000000..a70e89ae79 --- /dev/null +++ b/akka-docs/src/main/paradox/java/camel.md @@ -0,0 +1 @@ +../scala/camel.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/distributed-data.md b/akka-docs/src/main/paradox/java/distributed-data.md deleted file mode 100644 index 0813e8e92c..0000000000 --- a/akka-docs/src/main/paradox/java/distributed-data.md +++ /dev/null @@ -1,665 +0,0 @@ -# Distributed Data - -*Akka Distributed Data* is useful when you need to share data between nodes in an -Akka Cluster. The data is accessed with an actor providing a key-value store like API. -The keys are unique identifiers with type information of the data values. The values -are *Conflict Free Replicated Data Types* (CRDTs). - -All data entries are spread to all nodes, or nodes with a certain role, in the cluster -via direct replication and gossip based dissemination. You have fine grained control -of the consistency level for reads and writes. - -The nature CRDTs makes it possible to perform updates from any node without coordination. -Concurrent updates from different nodes will automatically be resolved by the monotonic -merge function, which all data types must provide. The state changes always converge. -Several useful data types for counters, sets, maps and registers are provided and -you can also implement your own custom data types. - -It is eventually consistent and geared toward providing high read and write availability -(partition tolerance), with low latency. Note that in an eventually consistent system a read may return an -out-of-date value. - -## Using the Replicator - -The `akka.cluster.ddata.Replicator` actor provides the API for interacting with the data. -The `Replicator` actor must be started on each node in the cluster, or group of nodes tagged -with a specific role. It communicates with other `Replicator` instances with the same path -(without address) that are running on other nodes . For convenience it can be used with the -`akka.cluster.ddata.DistributedData` extension but it can also be started as an ordinary -actor using the `Replicator.props`. If it is started as an ordinary actor it is important -that it is given the same name, started on same path, on all nodes. - -Cluster members with status @ref:[WeaklyUp](cluster-usage.md#weakly-up), -will participate in Distributed Data. This means that the data will be replicated to the -@ref:[WeaklyUp](cluster-usage.md#weakly-up) nodes with the background gossip protocol. Note that it -will not participate in any actions where the consistency mode is to read/write from all -nodes or the majority of nodes. The @ref:[WeaklyUp](cluster-usage.md#weakly-up) node is not counted -as part of the cluster. So 3 nodes + 5 @ref:[WeaklyUp](cluster-usage.md#weakly-up) is essentially a -3 node cluster as far as consistent actions are concerned. - -Below is an example of an actor that schedules tick messages to itself and for each tick -adds or removes elements from a `ORSet` (observed-remove set). It also subscribes to -changes of this. - -@@snip [DataBot.java]($code$/java/jdocs/ddata/DataBot.java) { #data-bot } - - -### Update - -To modify and replicate a data value you send a `Replicator.Update` message to the local -`Replicator`. - -The current data value for the `key` of the `Update` is passed as parameter to the `modify` -function of the `Update`. The function is supposed to return the new value of the data, which -will then be replicated according to the given consistency level. - -The `modify` function is called by the `Replicator` actor and must therefore be a pure -function that only uses the data parameter and stable fields from enclosing scope. It must -for example not access the sender reference of an enclosing actor. - -`Update` - is intended to only be sent from an actor running in same local -`ActorSystem` - as -: the `Replicator`, because the `modify` function is typically not serializable. - - -You supply a write consistency level which has the following meaning: - - * `writeLocal` the value will immediately only be written to the local replica, -and later disseminated with gossip - * `WriteTo(n)` the value will immediately be written to at least `n` replicas, -including the local replica - * `WriteMajority` the value will immediately be written to a majority of replicas, i.e. -at least **N/2 + 1** replicas, where N is the number of nodes in the cluster -(or cluster role group) - * `WriteAll` the value will immediately be written to all nodes in the cluster -(or all nodes in the cluster role group) - -When you specify to write to `n` out of `x` nodes, the update will first replicate to `n` nodes. -If there are not enough Acks after 1/5th of the timeout, the update will be replicated to `n` other -nodes. If there are less than n nodes left all of the remaining nodes are used. Reachable nodes -are prefered over unreachable nodes. - -Note that `WriteMajority` has a `minCap` parameter that is useful to specify to achieve better safety for small clusters. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update } - -As reply of the `Update` a `Replicator.UpdateSuccess` is sent to the sender of the -`Update` if the value was successfully replicated according to the supplied consistency -level within the supplied timeout. Otherwise a `Replicator.UpdateFailure` subclass is -sent back. Note that a `Replicator.UpdateTimeout` reply does not mean that the update completely failed -or was rolled back. It may still have been replicated to some nodes, and will eventually -be replicated to all nodes with the gossip protocol. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-response1 } - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-response2 } - -You will always see your own writes. For example if you send two `Update` messages -changing the value of the same `key`, the `modify` function of the second message will -see the change that was performed by the first `Update` message. - -In the `Update` message you can pass an optional request context, which the `Replicator` -does not care about, but is included in the reply messages. This is a convenient -way to pass contextual information (e.g. original sender) without having to use `ask` -or maintain local correlation data structures. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-request-context } - - -### Get - -To retrieve the current value of a data you send `Replicator.Get` message to the -`Replicator`. You supply a consistency level which has the following meaning: - - * `readLocal` the value will only be read from the local replica - * `ReadFrom(n)` the value will be read and merged from `n` replicas, -including the local replica - * `ReadMajority` the value will be read and merged from a majority of replicas, i.e. -at least **N/2 + 1** replicas, where N is the number of nodes in the cluster -(or cluster role group) - * `ReadAll` the value will be read and merged from all nodes in the cluster -(or all nodes in the cluster role group) - -Note that `ReadMajority` has a `minCap` parameter that is useful to specify to achieve better safety for small clusters. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get } - -As reply of the `Get` a `Replicator.GetSuccess` is sent to the sender of the -`Get` if the value was successfully retrieved according to the supplied consistency -level within the supplied timeout. Otherwise a `Replicator.GetFailure` is sent. -If the key does not exist the reply will be `Replicator.NotFound`. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-response1 } - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-response2 } - -You will always read your own writes. For example if you send a `Update` message -followed by a `Get` of the same `key` the `Get` will retrieve the change that was -performed by the preceding `Update` message. However, the order of the reply messages are -not defined, i.e. in the previous example you may receive the `GetSuccess` before -the `UpdateSuccess`. - -In the `Get` message you can pass an optional request context in the same way as for the -`Update` message, described above. For example the original sender can be passed and replied -to after receiving and transforming `GetSuccess`. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-request-context } - -### Consistency - -The consistency level that is supplied in the [Update](#replicator-update) and [Get](#replicator-get) -specifies per request how many replicas that must respond successfully to a write and read request. - -For low latency reads you use `ReadLocal` with the risk of retrieving stale data, i.e. updates -from other nodes might not be visible yet. - -When using `writeLocal` the update is only written to the local replica and then disseminated -in the background with the gossip protocol, which can take few seconds to spread to all nodes. - -`WriteAll` and `ReadAll` is the strongest consistency level, but also the slowest and with -lowest availability. For example, it is enough that one node is unavailable for a `Get` request -and you will not receive the value. - -If consistency is important, you can ensure that a read always reflects the most recent -write by using the following formula: - -``` -(nodes_written + nodes_read) > N -``` - -where N is the total number of nodes in the cluster, or the number of nodes with the role that is -used for the `Replicator`. - -For example, in a 7 node cluster this these consistency properties are achieved by writing to 4 nodes -and reading from 4 nodes, or writing to 5 nodes and reading from 3 nodes. - -By combining `WriteMajority` and `ReadMajority` levels a read always reflects the most recent write. -The `Replicator` writes and reads to a majority of replicas, i.e. **N / 2 + 1**. For example, -in a 5 node cluster it writes to 3 nodes and reads from 3 nodes. In a 6 node cluster it writes -to 4 nodes and reads from 4 nodes. - -You can define a minimum number of nodes for `WriteMajority` and `ReadMajority`, -this will minimize the risk of reading steal data. Minimum cap is -provided by minCap property of `WriteMajority` and `ReadMajority` and defines the required majority. -If the minCap is higher then **N / 2 + 1** the minCap will be used. - -For example if the minCap is 5 the `WriteMajority` and `ReadMajority` for cluster of 3 nodes will be 3, for -cluster of 6 nodes will be 5 and for cluster of 12 nodes will be 7 ( **N / 2 + 1** ). - -For small clusters (<7) the risk of membership changes between a WriteMajority and ReadMajority -is rather high and then the nice properties of combining majority write and reads are not -guaranteed. Therefore the `ReadMajority` and `WriteMajority` have a `minCap` parameter that -is useful to specify to achieve better safety for small clusters. It means that if the cluster -size is smaller than the majority size it will use the `minCap` number of nodes but at most -the total size of the cluster. - -Here is an example of using `writeMajority` and `readMajority`: - -@@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #read-write-majority } - -@@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #get-cart } - -@@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #add-item } - -In some rare cases, when performing an `Update` it is needed to first try to fetch latest data from -other nodes. That can be done by first sending a `Get` with `ReadMajority` and then continue with -the `Update` when the `GetSuccess`, `GetFailure` or `NotFound` reply is received. This might be -needed when you need to base a decision on latest information or when removing entries from `ORSet` -or `ORMap`. If an entry is added to an `ORSet` or `ORMap` from one node and removed from another -node the entry will only be removed if the added entry is visible on the node where the removal is -performed (hence the name observed-removed set). - -The following example illustrates how to do that: - -@@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #remove-item } - -@@@ warning - -*Caveat:* Even if you use `writeMajority` and `readMajority` there is small risk that you may -read stale data if the cluster membership has changed between the `Update` and the `Get`. -For example, in cluster of 5 nodes when you `Update` and that change is written to 3 nodes: -n1, n2, n3. Then 2 more nodes are added and a `Get` request is reading from 4 nodes, which -happens to be n4, n5, n6, n7, i.e. the value on n1, n2, n3 is not seen in the response of the -`Get` request. - -@@@ - -### Subscribe - -You may also register interest in change notifications by sending `Replicator.Subscribe` -message to the `Replicator`. It will send `Replicator.Changed` messages to the registered -subscriber when the data for the subscribed key is updated. Subscribers will be notified -periodically with the configured `notify-subscribers-interval`, and it is also possible to -send an explicit `Replicator.FlushChanges` message to the `Replicator` to notify the subscribers -immediately. - -The subscriber is automatically removed if the subscriber is terminated. A subscriber can -also be deregistered with the `Replicator.Unsubscribe` message. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #subscribe } - -### Delete - -A data entry can be deleted by sending a `Replicator.Delete` message to the local -local `Replicator`. As reply of the `Delete` a `Replicator.DeleteSuccess` is sent to -the sender of the `Delete` if the value was successfully deleted according to the supplied -consistency level within the supplied timeout. Otherwise a `Replicator.ReplicationDeleteFailure` -is sent. Note that `ReplicationDeleteFailure` does not mean that the delete completely failed or -was rolled back. It may still have been replicated to some nodes, and may eventually be replicated -to all nodes. - -A deleted key cannot be reused again, but it is still recommended to delete unused -data entries because that reduces the replication overhead when new nodes join the cluster. -Subsequent `Delete`, `Update` and `Get` requests will be replied with `Replicator.DataDeleted`. -Subscribers will receive `Replicator.DataDeleted`. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #delete } - -@@@ warning - -As deleted keys continue to be included in the stored data on each node as well as in gossip -messages, a continuous series of updates and deletes of top-level entities will result in -growing memory usage until an ActorSystem runs out of memory. To use Akka Distributed Data -where frequent adds and removes are required, you should use a fixed number of top-level data -types that support both updates and removals, for example `ORMap` or `ORSet`. - -@@@ - - -### delta-CRDT - -[Delta State Replicated Data Types](http://arxiv.org/abs/1603.01529) -are supported. delta-CRDT is a way to reduce the need for sending the full state -for updates. For example adding element `'c'` and `'d'` to set `{'a', 'b'}` would -result in sending the delta `{'c', 'd'}` and merge that with the state on the -receiving side, resulting in set `{'a', 'b', 'c', 'd'}`. - -The protocol for replicating the deltas supports causal consistency if the data type -is marked with `RequiresCausalDeliveryOfDeltas`. Otherwise it is only eventually -consistent. Without causal consistency it means that if elements `'c'` and `'d'` are -added in two separate *Update* operations these deltas may occasionally be propagated -to nodes in different order than the causal order of the updates. For this example it -can result in that set `{'a', 'b', 'd'}` can be seen before element 'c' is seen. Eventually -it will be `{'a', 'b', 'c', 'd'}`. - -Note that the full state is occasionally also replicated for delta-CRDTs, for example when -new nodes are added to the cluster or when deltas could not be propagated because -of network partitions or similar problems. - -The the delta propagation can be disabled with configuration property: - -``` -akka.cluster.distributed-data.delta-crdt.enabled=off -``` - -## Data Types - -The data types must be convergent (stateful) CRDTs and implement the `ReplicatedData` trait, -i.e. they provide a monotonic merge function and the state changes always converge. - -You can use your own custom `AbstractReplicatedData` or `AbstractDeltaReplicatedData` types, -and several types are provided by this package, such as: - - * Counters: `GCounter`, `PNCounter` - * Sets: `GSet`, `ORSet` - * Maps: `ORMap`, `ORMultiMap`, `LWWMap`, `PNCounterMap` - * Registers: `LWWRegister`, `Flag` - -### Counters - -`GCounter` is a "grow only counter". It only supports increments, no decrements. - -It works in a similar way as a vector clock. It keeps track of one counter per node and the total -value is the sum of these counters. The `merge` is implemented by taking the maximum count for -each node. - -If you need both increments and decrements you can use the `PNCounter` (positive/negative counter). - -It is tracking the increments (P) separate from the decrements (N). Both P and N are represented -as two internal `GCounter`. Merge is handled by merging the internal P and N counters. -The value of the counter is the value of the P counter minus the value of the N counter. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #pncounter } - -`GCounter` and `PNCounter` have support for [delta-CRDT](#delta-crdt) and don't need causal -delivery of deltas. - -Several related counters can be managed in a map with the `PNCounterMap` data type. -When the counters are placed in a `PNCounterMap` as opposed to placing them as separate top level -values they are guaranteed to be replicated together as one unit, which is sometimes necessary for -related data. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #pncountermap } - -### Sets - -If you only need to add elements to a set and not remove elements the `GSet` (grow-only set) is -the data type to use. The elements can be any type of values that can be serialized. -Merge is simply the union of the two sets. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #gset } - -`GSet` has support for [delta-CRDT](#delta-crdt) and it doesn't require causal delivery of deltas. - -If you need add and remove operations you should use the `ORSet` (observed-remove set). -Elements can be added and removed any number of times. If an element is concurrently added and -removed, the add will win. You cannot remove an element that you have not seen. - -The `ORSet` has a version vector that is incremented when an element is added to the set. -The version for the node that added the element is also tracked for each element in a so -called "birth dot". The version vector and the dots are used by the `merge` function to -track causality of the operations and resolve concurrent updates. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #orset } - -`ORSet` has support for [delta-CRDT](#delta-crdt) and it requires causal delivery of deltas. - -### Maps - -`ORMap` (observed-remove map) is a map with keys of `Any` type and the values are `ReplicatedData` -types themselves. It supports add, remove and delete any number of times for a map entry. - -If an entry is concurrently added and removed, the add will win. You cannot remove an entry that -you have not seen. This is the same semantics as for the `ORSet`. - -If an entry is concurrently updated to different values the values will be merged, hence the -requirement that the values must be `ReplicatedData` types. - -It is rather inconvenient to use the `ORMap` directly since it does not expose specific types -of the values. The `ORMap` is intended as a low level tool for building more specific maps, -such as the following specialized maps. - -`ORMultiMap` (observed-remove multi-map) is a multi-map implementation that wraps an -`ORMap` with an `ORSet` for the map's value. - -`PNCounterMap` (positive negative counter map) is a map of named counters. It is a specialized -`ORMap` with `PNCounter` values. - -`LWWMap` (last writer wins map) is a specialized `ORMap` with `LWWRegister` (last writer wins register) -values. - -`ORMap`, `ORMultiMap`, `PNCounterMap` and `LWWMap` have support for [delta-CRDT](#delta-crdt) and they require causal -delivery of deltas. Support for deltas here means that the `ORSet` being underlying key type for all those maps -uses delta propagation to deliver updates. Effectively, the update for map is then a pair, consisting of delta for the `ORSet` -being the key and full update for the respective value (`ORSet`, `PNCounter` or `LWWRegister`) kept in the map. - -There is a special version of `ORMultiMap`, created by using separate constructor -`ORMultiMap.emptyWithValueDeltas[A, B]`, that also propagates the updates to its values (of `ORSet` type) as deltas. -This means that the `ORMultiMap` initiated with `ORMultiMap.emptyWithValueDeltas` propagates its updates as pairs -consisting of delta of the key and delta of the value. It is much more efficient in terms of network bandwith consumed. -However, this behaviour has not been made default for `ORMultiMap` because currently the merge process for -updates for `ORMultiMap.emptyWithValueDeltas` results in a tombstone (being a form of [CRDT Garbage](#crdt-garbage) ) -in form of additional `ORSet` entry being created in a situation when a key has been added and then removed. -There is ongoing work aimed at removing necessity of creation of the aforementioned tombstone. Please also note -that despite having the same Scala type, `ORMultiMap.emptyWithValueDeltas` is not compatible with 'vanilla' `ORMultiMap`, -because of different replication mechanism. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #ormultimap } - -When a data entry is changed the full state of that entry is replicated to other nodes, i.e. -when you update a map the whole map is replicated. Therefore, instead of using one `ORMap` -with 1000 elements it is more efficient to split that up in 10 top level `ORMap` entries -with 100 elements each. Top level entries are replicated individually, which has the -trade-off that different entries may not be replicated at the same time and you may see -inconsistencies between related entries. Separate top level entries cannot be updated atomically -together. - -Note that `LWWRegister` and therefore `LWWMap` relies on synchronized clocks and should only be used -when the choice of value is not important for concurrent updates occurring within the clock skew. Read more -in the below section about `LWWRegister`. - -### Flags and Registers - -`Flag` is a data type for a boolean value that is initialized to `false` and can be switched -to `true`. Thereafter it cannot be changed. `true` wins over `false` in merge. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #flag } - -`LWWRegister` (last writer wins register) can hold any (serializable) value. - -Merge of a `LWWRegister` takes the register with highest timestamp. Note that this -relies on synchronized clocks. *LWWRegister* should only be used when the choice of -value is not important for concurrent updates occurring within the clock skew. - -Merge takes the register updated by the node with lowest address (`UniqueAddress` is ordered) -if the timestamps are exactly the same. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #lwwregister } - -Instead of using timestamps based on `System.currentTimeMillis()` time it is possible to -use a timestamp value based on something else, for example an increasing version number -from a database record that is used for optimistic concurrency control. - -@@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #lwwregister-custom-clock } - -For first-write-wins semantics you can use the `LWWRegister#reverseClock` instead of the -`LWWRegister#defaultClock`. - -The `defaultClock` is using max value of `System.currentTimeMillis()` and `currentTimestamp + 1`. -This means that the timestamp is increased for changes on the same node that occurs within -the same millisecond. It also means that it is safe to use the `LWWRegister` without -synchronized clocks when there is only one active writer, e.g. a Cluster Singleton. Such a -single writer should then first read current value with `ReadMajority` (or more) before -changing and writing the value with `WriteMajority` (or more). - -### Custom Data Type - -You can rather easily implement your own data types. The only requirement is that it implements -the `mergeData` function of the `AbstractReplicatedData` class. - -A nice property of stateful CRDTs is that they typically compose nicely, i.e. you can combine several -smaller data types to build richer data structures. For example, the `PNCounter` is composed of -two internal `GCounter` instances to keep track of increments and decrements separately. - -Here is s simple implementation of a custom `TwoPhaseSet` that is using two internal `GSet` types -to keep track of addition and removals. A `TwoPhaseSet` is a set where an element may be added and -removed, but never added again thereafter. - -@@snip [TwoPhaseSet.java]($code$/java/jdocs/ddata/TwoPhaseSet.java) { #twophaseset } - -Data types should be immutable, i.e. "modifying" methods should return a new instance. - -Implement the additional methods of `AbstractDeltaReplicatedData` if it has support for delta-CRDT replication. - -#### Serialization - -The data types must be serializable with an @ref:[Akka Serializer](serialization.md). -It is highly recommended that you implement efficient serialization with Protobuf or similar -for your custom data types. The built in data types are marked with `ReplicatedDataSerialization` -and serialized with `akka.cluster.ddata.protobuf.ReplicatedDataSerializer`. - -Serialization of the data types are used in remote messages and also for creating message -digests (SHA-1) to detect changes. Therefore it is important that the serialization is efficient -and produce the same bytes for the same content. For example sets and maps should be sorted -deterministically in the serialization. - -This is a protobuf representation of the above `TwoPhaseSet`: - -@@snip [TwoPhaseSetMessages.proto]($code$/../main/protobuf/TwoPhaseSetMessages.proto) { #twophaseset } - -The serializer for the `TwoPhaseSet`: - -@@snip [TwoPhaseSetSerializer.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java) { #serializer } - -Note that the elements of the sets are sorted so the SHA-1 digests are the same -for the same elements. - -You register the serializer in configuration: - -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #japi-serializer-config } - -Using compression can sometimes be a good idea to reduce the data size. Gzip compression is -provided by the `akka.cluster.ddata.protobuf.SerializationSupport` trait: - -@@snip [TwoPhaseSetSerializerWithCompression.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java) { #compression } - -The two embedded `GSet` can be serialized as illustrated above, but in general when composing -new data types from the existing built in types it is better to make use of the existing -serializer for those types. This can be done by declaring those as bytes fields in protobuf: - -@@snip [TwoPhaseSetMessages.proto]($code$/../main/protobuf/TwoPhaseSetMessages.proto) { #twophaseset2 } - -and use the methods `otherMessageToProto` and `otherMessageFromBinary` that are provided -by the `SerializationSupport` trait to serialize and deserialize the `GSet` instances. This -works with any type that has a registered Akka serializer. This is how such an serializer would -look like for the `TwoPhaseSet`: - -@@snip [TwoPhaseSetSerializer2.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java) { #serializer } - - -### Durable Storage - -By default the data is only kept in memory. It is redundant since it is replicated to other nodes -in the cluster, but if you stop all nodes the data is lost, unless you have saved it -elsewhere. - -Entries can be configured to be durable, i.e. stored on local disk on each node. The stored data will be loaded -next time the replicator is started, i.e. when actor system is restarted. This means data will survive as -long as at least one node from the old cluster takes part in a new cluster. The keys of the durable entries -are configured with: - -``` -akka.cluster.distributed-data.durable.keys = ["a", "b", "durable*"] -``` - -Prefix matching is supported by using `*` at the end of a key. - -All entries can be made durable by specifying: - -``` -akka.cluster.distributed-data.durable.keys = ["*"] -``` - -[LMDB](https://github.com/lmdbjava/lmdbjava/) is the default storage implementation. It is -possible to replace that with another implementation by implementing the actor protocol described in -`akka.cluster.ddata.DurableStore` and defining the `akka.cluster.distributed-data.durable.store-actor-class` -property for the new implementation. - -The location of the files for the data is configured with: - -``` -# Directory of LMDB file. There are two options: -# 1. A relative or absolute path to a directory that ends with 'ddata' -# the full name of the directory will contain name of the ActorSystem -# and its remote port. -# 2. Otherwise the path is used as is, as a relative or absolute path to -# a directory. -akka.cluster.distributed-data.lmdb.dir = "ddata" -``` - -When running in production you may want to configure the directory to a specific -path (alt 2), since the default directory contains the remote port of the -actor system to make the name unique. If using a dynamically assigned -port (0) it will be different each time and the previously stored data -will not be loaded. - -Making the data durable has of course a performance cost. By default, each update is flushed -to disk before the `UpdateSuccess` reply is sent. For better performance, but with the risk of losing -the last writes if the JVM crashes, you can enable write behind mode. Changes are then accumulated during -a time period before it is written to LMDB and flushed to disk. Enabling write behind is especially -efficient when performing many writes to the same key, because it is only the last value for each key -that will be serialized and stored. The risk of losing writes if the JVM crashes is small since the -data is typically replicated to other nodes immediately according to the given `WriteConsistency`. - -``` -akka.cluster.distributed-data.lmdb.write-behind-interval = 200 ms -``` - -Note that you should be prepared to receive `WriteFailure` as reply to an `Update` of a -durable entry if the data could not be stored for some reason. When enabling `write-behind-interval` -such errors will only be logged and `UpdateSuccess` will still be the reply to the `Update`. - -There is one important caveat when it comes pruning of [CRDT Garbage](#crdt-garbage) for durable data. -If and old data entry that was never pruned is injected and merged with existing data after -that the pruning markers have been removed the value will not be correct. The time-to-live -of the markers is defined by configuration -`akka.cluster.distributed-data.durable.remove-pruning-marker-after` and is in the magnitude of days. -This would be possible if a node with durable data didn't participate in the pruning -(e.g. it was shutdown) and later started after this time. A node with durable data should not -be stopped for longer time than this duration and if it is joining again after this -duration its data should first be manually removed (from the lmdb directory). - - -### CRDT Garbage - -One thing that can be problematic with CRDTs is that some data types accumulate history (garbage). -For example a `GCounter` keeps track of one counter per node. If a `GCounter` has been updated -from one node it will associate the identifier of that node forever. That can become a problem -for long running systems with many cluster nodes being added and removed. To solve this problem -the `Replicator` performs pruning of data associated with nodes that have been removed from the -cluster. Data types that need pruning have to implement the `RemovedNodePruning` trait. See the -API documentation of the `Replicator` for details. - -## Samples - -Several interesting samples are included and described in the -tutorial named @extref[Akka Distributed Data Samples with Java](ecs:akka-samples-distributed-data-java) (@extref[source code](samples:akka-sample-distributed-data-java)) - - * Low Latency Voting Service - * Highly Available Shopping Cart - * Distributed Service Registry - * Replicated Cache - * Replicated Metrics - -## Limitations - -There are some limitations that you should be aware of. - -CRDTs cannot be used for all types of problems, and eventual consistency does not fit -all domains. Sometimes you need strong consistency. - -It is not intended for *Big Data*. The number of top level entries should not exceed 100000. -When a new node is added to the cluster all these entries are transferred (gossiped) to the -new node. The entries are split up in chunks and all existing nodes collaborate in the gossip, -but it will take a while (tens of seconds) to transfer all entries and this means that you -cannot have too many top level entries. The current recommended limit is 100000. We will -be able to improve this if needed, but the design is still not intended for billions of entries. - -All data is held in memory, which is another reason why it is not intended for *Big Data*. - -When a data entry is changed the full state of that entry may be replicated to other nodes -if it doesn't support [delta-CRDT](#delta-crdt). The full state is also replicated for delta-CRDTs, -for example when new nodes are added to the cluster or when deltas could not be propagated because -of network partitions or similar problems. This means that you cannot have too large -data entries, because then the remote message size will be too large. - -## Learn More about CRDTs - - * [The Final Causal Frontier](http://www.ustream.tv/recorded/61448875) -talk by Sean Cribbs - * [Eventually Consistent Data Structures](https://vimeo.com/43903960) -talk by Sean Cribbs - * [Strong Eventual Consistency and Conflict-free Replicated Data Types](http://research.microsoft.com/apps/video/default.aspx?id=153540&r=1) -talk by Mark Shapiro - * [A comprehensive study of Convergent and Commutative Replicated Data Types](http://hal.upmc.fr/file/index/docid/555588/filename/techreport.pdf) -paper by Mark Shapiro et. al. - -## Dependencies - -To use Distributed Data you must add the following dependency in your project. - -sbt -: @@@vars - ``` - "com.typesafe.akka" %% "akka-distributed-data" % "$akka.version$" - ``` - @@@ - -Maven -: @@@vars - ``` - - com.typesafe.akka - akka-distributed-data_$scala.binary_version$ - $akka.version$ - - ``` - @@@ - -## Configuration - -The `DistributedData` extension can be configured with the following properties: - -@@snip [reference.conf]($akka$/akka-distributed-data/src/main/resources/reference.conf) { #distributed-data } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/distributed-data.md b/akka-docs/src/main/paradox/java/distributed-data.md new file mode 120000 index 0000000000..09729dd7b7 --- /dev/null +++ b/akka-docs/src/main/paradox/java/distributed-data.md @@ -0,0 +1 @@ +../scala/distributed-data.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/distributed-pub-sub.md b/akka-docs/src/main/paradox/java/distributed-pub-sub.md deleted file mode 100644 index bb844e0cf6..0000000000 --- a/akka-docs/src/main/paradox/java/distributed-pub-sub.md +++ /dev/null @@ -1,202 +0,0 @@ -# Distributed Publish Subscribe in Cluster - -How do I send a message to an actor without knowing which node it is running on? - -How do I send messages to all actors in the cluster that have registered interest -in a named topic? - -This pattern provides a mediator actor, `akka.cluster.pubsub.DistributedPubSubMediator`, -that manages a registry of actor references and replicates the entries to peer -actors among all cluster nodes or a group of nodes tagged with a specific role. - -The `DistributedPubSubMediator` actor is supposed to be started on all nodes, -or all nodes with specified role, in the cluster. The mediator can be -started with the `DistributedPubSub` extension or as an ordinary actor. - -The registry is eventually consistent, i.e. changes are not immediately visible at -other nodes, but typically they will be fully replicated to all other nodes after -a few seconds. Changes are only performed in the own part of the registry and those -changes are versioned. Deltas are disseminated in a scalable way to other nodes with -a gossip protocol. - -Cluster members with status @ref:[WeaklyUp](cluster-usage.md#weakly-up), -will participate in Distributed Publish Subscribe, i.e. subscribers on nodes with -`WeaklyUp` status will receive published messages if the publisher and subscriber are on -same side of a network partition. - -You can send messages via the mediator on any node to registered actors on -any other node. - -There a two different modes of message delivery, explained in the sections -[Publish](#distributed-pub-sub-publish) and [Send](#distributed-pub-sub-send) below. - - -## Publish - -This is the true pub/sub mode. A typical usage of this mode is a chat room in an instant -messaging application. - -Actors are registered to a named topic. This enables many subscribers on each node. -The message will be delivered to all subscribers of the topic. - -For efficiency the message is sent over the wire only once per node (that has a matching topic), -and then delivered to all subscribers of the local topic representation. - -You register actors to the local mediator with `DistributedPubSubMediator.Subscribe`. -Successful `Subscribe` and `Unsubscribe` is acknowledged with -`DistributedPubSubMediator.SubscribeAck` and `DistributedPubSubMediator.UnsubscribeAck` -replies. The acknowledgment means that the subscription is registered, but it can still -take some time until it is replicated to other nodes. - -You publish messages by sending `DistributedPubSubMediator.Publish` message to the -local mediator. - -Actors are automatically removed from the registry when they are terminated, or you -can explicitly remove entries with `DistributedPubSubMediator.Unsubscribe`. - -An example of a subscriber actor: - -@@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #subscriber } - -Subscriber actors can be started on several nodes in the cluster, and all will receive -messages published to the "content" topic. - -@@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #start-subscribers } - -A simple actor that publishes to this "content" topic: - -@@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #publisher } - -It can publish messages to the topic from anywhere in the cluster: - -@@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #publish-message } - -### Topic Groups - -Actors may also be subscribed to a named topic with a `group` id. -If subscribing with a group id, each message published to a topic with the -`sendOneMessageToEachGroup` flag set to `true` is delivered via the supplied `RoutingLogic` -(default random) to one actor within each subscribing group. - -If all the subscribed actors have the same group id, then this works just like -`Send` and each message is only delivered to one subscriber. - -If all the subscribed actors have different group names, then this works like -normal `Publish` and each message is broadcasted to all subscribers. - -@@@ note - -Note that if the group id is used it is part of the topic identifier. -Messages published with `sendOneMessageToEachGroup=false` will not be delivered -to subscribers that subscribed with a group id. -Messages published with `sendOneMessageToEachGroup=true` will not be delivered -to subscribers that subscribed without a group id. - -@@@ - - -## Send - -This is a point-to-point mode where each message is delivered to one destination, -but you still do not have to know where the destination is located. -A typical usage of this mode is private chat to one other user in an instant messaging -application. It can also be used for distributing tasks to registered workers, like a -cluster aware router where the routees dynamically can register themselves. - -The message will be delivered to one recipient with a matching path, if any such -exists in the registry. If several entries match the path because it has been registered -on several nodes the message will be sent via the supplied `RoutingLogic` (default random) -to one destination. The sender of the message can specify that local affinity is preferred, -i.e. the message is sent to an actor in the same local actor system as the used mediator actor, -if any such exists, otherwise route to any other matching entry. - -You register actors to the local mediator with `DistributedPubSubMediator.Put`. -The `ActorRef` in `Put` must belong to the same local actor system as the mediator. -The path without address information is the key to which you send messages. -On each node there can only be one actor for a given path, since the path is unique -within one local actor system. - -You send messages by sending `DistributedPubSubMediator.Send` message to the -local mediator with the path (without address information) of the destination -actors. - -Actors are automatically removed from the registry when they are terminated, or you -can explicitly remove entries with `DistributedPubSubMediator.Remove`. - -An example of a destination actor: - -@@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #send-destination } - -Subscriber actors can be started on several nodes in the cluster, and all will receive -messages published to the "content" topic. - -@@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #start-send-destinations } - -A simple actor that publishes to this "content" topic: - -@@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #sender } - -It can publish messages to the topic from anywhere in the cluster: - -@@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #send-message } - -It is also possible to broadcast messages to the actors that have been registered with -`Put`. Send `DistributedPubSubMediator.SendToAll` message to the local mediator and the wrapped message -will then be delivered to all recipients with a matching path. Actors with -the same path, without address information, can be registered on different nodes. -On each node there can only be one such actor, since the path is unique within one -local actor system. - -Typical usage of this mode is to broadcast messages to all replicas -with the same path, e.g. 3 actors on different nodes that all perform the same actions, -for redundancy. You can also optionally specify a property (`allButSelf`) deciding -if the message should be sent to a matching path on the self node or not. - -## DistributedPubSub Extension - -In the example above the mediator is started and accessed with the `akka.cluster.pubsub.DistributedPubSub` extension. -That is convenient and perfectly fine in most cases, but it can be good to know that it is possible to -start the mediator actor as an ordinary actor and you can have several different mediators at the same -time to be able to divide a large number of actors/topics to different mediators. For example you might -want to use different cluster roles for different mediators. - -The `DistributedPubSub` extension can be configured with the following properties: - -@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #pub-sub-ext-config } - -It is recommended to load the extension when the actor system is started by defining it in -`akka.extensions` configuration property. Otherwise it will be activated when first used -and then it takes a while for it to be populated. - -``` -akka.extensions = ["akka.cluster.pubsub.DistributedPubSub"] -``` - -## Delivery Guarantee - -As in @ref:[Message Delivery Reliability](general/message-delivery-reliability.md) of Akka, message delivery guarantee in distributed pub sub modes is **at-most-once delivery**. -In other words, messages can be lost over the wire. - -If you are looking for at-least-once delivery guarantee, we recommend [Kafka Akka Streams integration](http://doc.akka.io/docs/akka-stream-kafka/current/home.html). - -## Dependencies - -To use Distributed Publish Subscribe you must add the following dependency in your project. - -sbt -: @@@vars - ``` - "com.typesafe.akka" %% "akka-cluster-tools" % "$akka.version$" - ``` - @@@ - -Maven -: @@@vars - ``` - - com.typesafe.akka - akka-cluster-tools_$scala.binary_version$ - $akka.version$ - - ``` - @@@ \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/distributed-pub-sub.md b/akka-docs/src/main/paradox/java/distributed-pub-sub.md new file mode 120000 index 0000000000..66fda13075 --- /dev/null +++ b/akka-docs/src/main/paradox/java/distributed-pub-sub.md @@ -0,0 +1 @@ +../scala/distributed-pub-sub.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/futures.md b/akka-docs/src/main/paradox/java/futures.md deleted file mode 100644 index 03d279259f..0000000000 --- a/akka-docs/src/main/paradox/java/futures.md +++ /dev/null @@ -1,296 +0,0 @@ -# Futures - -## Introduction - -In the Scala Standard Library, a [Future](http://en.wikipedia.org/wiki/Futures_and_promises) is a data structure -used to retrieve the result of some concurrent operation. This result can be accessed synchronously (blocking) -or asynchronously (non-blocking). To be able to use this from Java, Akka provides a java friendly interface -in `akka.dispatch.Futures`. - -See also @ref:[Java 8 Compatibility](java8-compat.md) for Java compatibility. - -## Execution Contexts - -In order to execute callbacks and operations, Futures need something called an `ExecutionContext`, -which is very similar to a `java.util.concurrent.Executor`. if you have an `ActorSystem` in scope, -it will use its default dispatcher as the `ExecutionContext`, or you can use the factory methods provided -by the `ExecutionContexts` class to wrap `Executors` and `ExecutorServices`, or even create your own. - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports1 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #diy-execution-context } - -## Use with Actors - -There are generally two ways of getting a reply from an `AbstractActor`: the first is by a sent message (`actorRef.tell(msg, sender)`), -which only works if the original sender was an `AbstractActor`) and the second is through a `Future`. - -Using the `ActorRef`'s `ask` method to send a message will return a `Future`. -To wait for and retrieve the actual result the simplest method is: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports1 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #ask-blocking } - -This will cause the current thread to block and wait for the `AbstractActor` to 'complete' the `Future` with it's reply. -Blocking is discouraged though as it can cause performance problem. -The blocking operations are located in `Await.result` and `Await.ready` to make it easy to spot where blocking occurs. -Alternatives to blocking are discussed further within this documentation. -Also note that the `Future` returned by an `AbstractActor` is a `Future` since an `AbstractActor` is dynamic. -That is why the cast to `String` is used in the above sample. - -@@@ warning - -`Await.result` and `Await.ready` are provided for exceptional situations where you **must** block, -a good rule of thumb is to only use them if you know why you **must** block. For all other cases, use -asynchronous composition as described below. - -@@@ - -To send the result of a `Future` to an `Actor`, you can use the `pipe` construct: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #pipe-to } - -## Use Directly - -A common use case within Akka is to have some computation performed concurrently without needing -the extra utility of an `AbstractActor`. If you find yourself creating a pool of `AbstractActor`s for the sole reason -of performing a calculation in parallel, there is an easier (and faster) way: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports2 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #future-eval } - -In the above code the block passed to `future` will be executed by the default `Dispatcher`, -with the return value of the block used to complete the `Future` (in this case, the result would be the string: "HelloWorld"). -Unlike a `Future` that is returned from an `AbstractActor`, this `Future` is properly typed, -and we also avoid the overhead of managing an `AbstractActor`. - -You can also create already completed Futures using the `Futures` class, which can be either successes: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #successful } - -Or failures: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #failed } - -It is also possible to create an empty `Promise`, to be filled later, and obtain the corresponding `Future`: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #promise } - -For these examples `PrintResult` is defined as follows: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #print-result } - -## Functional Futures - -Scala's `Future` has several monadic methods that are very similar to the ones used by `Scala`'s collections. -These allow you to create 'pipelines' or 'streams' that the result will travel through. - -### Future is a Monad - -The first method for working with `Future` functionally is `map`. This method takes a `Mapper` which performs -some operation on the result of the `Future`, and returning a new result. -The return value of the `map` method is another `Future` that will contain the new result: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports2 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #map } - -In this example we are joining two strings together within a `Future`. Instead of waiting for f1 to complete, -we apply our function that calculates the length of the string using the `map` method. -Now we have a second `Future`, f2, that will eventually contain an `Integer`. -When our original `Future`, f1, completes, it will also apply our function and complete the second `Future` -with its result. When we finally `get` the result, it will contain the number 10. -Our original `Future` still contains the string "HelloWorld" and is unaffected by the `map`. - -Something to note when using these methods: passed work is always dispatched on the provided `ExecutionContext`. Even if -the `Future` has already been completed, when one of these methods is called. - -### Composing Futures - -It is very often desirable to be able to combine different Futures with each other, -below are some examples on how that can be done in a non-blocking fashion. - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports3 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #sequence } - -To better explain what happened in the example, `Future.sequence` is taking the `Iterable>` -and turning it into a `Future>`. We can then use `map` to work with the `Iterable` directly, -and we aggregate the sum of the `Iterable`. - -The `traverse` method is similar to `sequence`, but it takes a sequence of `A` and applies a function from `A` to `Future` -and returns a `Future>`, enabling parallel `map` over the sequence, if you use `Futures.future` to create the `Future`. - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports4 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #traverse } - -It's as simple as that! - -Then there's a method that's called `fold` that takes a start-value, -a sequence of `Future`:s and a function from the type of the start-value, a timeout, -and the type of the futures and returns something with the same type as the start-value, -and then applies the function to all elements in the sequence of futures, non-blockingly, -the execution will be started when the last of the Futures is completed. - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports5 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #fold } - -That's all it takes! - -If the sequence passed to `fold` is empty, it will return the start-value, in the case above, that will be empty String. -In some cases you don't have a start-value and you're able to use the value of the first completing `Future` -in the sequence as the start-value, you can use `reduce`, it works like this: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports6 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #reduce } - -Same as with `fold`, the execution will be started when the last of the Futures is completed, you can also parallelize -it by chunking your futures into sub-sequences and reduce them, and then reduce the reduced results again. - -This is just a sample of what can be done. - -## Callbacks - -Sometimes you just want to listen to a `Future` being completed, and react to that not by creating a new Future, but by side-effecting. -For this Scala supports `onComplete`, `onSuccess` and `onFailure`, of which the last two are specializations of the first. - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onSuccess } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onFailure } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onComplete } - -## Ordering - -Since callbacks are executed in any order and potentially in parallel, -it can be tricky at the times when you need sequential ordering of operations. -But there's a solution! And it's name is `andThen`, and it creates a new `Future` with -the specified callback, a `Future` that will have the same result as the `Future` it's called on, -which allows for ordering like in the following sample: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #and-then } - -## Auxiliary methods - -`Future` `fallbackTo` combines 2 Futures into a new `Future`, and will hold the successful value of the second `Future` -if the first `Future` fails. - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #fallback-to } - -You can also combine two Futures into a new `Future` that will hold a tuple of the two Futures successful results, -using the `zip` operation. - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #zip } - -## Exceptions - -Since the result of a `Future` is created concurrently to the rest of the program, exceptions must be handled differently. -It doesn't matter if an `AbstractActor` or the dispatcher is completing the `Future`, if an `Exception` is caught -the `Future` will contain it instead of a valid result. If a `Future` does contain an `Exception`, -calling `Await.result` will cause it to be thrown again so it can be handled properly. - -It is also possible to handle an `Exception` by returning a different result. -This is done with the `recover` method. For example: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #recover } - -In this example, if the actor replied with a `akka.actor.Status.Failure` containing the `ArithmeticException`, -our `Future` would have a result of 0. The `recover` method works very similarly to the standard try/catch blocks, -so multiple `Exception`s can be handled in this manner, and if an `Exception` is not handled this way -it will behave as if we hadn't used the `recover` method. - -You can also use the `recoverWith` method, which has the same relationship to `recover` as `flatMap` has to `map`, -and is use like this: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #try-recover } - -## After - -`akka.pattern.Patterns.after` makes it easy to complete a `Future` with a value or exception after a timeout. - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports7 } - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #after } - -## Java 8, CompletionStage and CompletableFuture - -Starting with Akka 2.4.2 we have begun to introduce Java 8 `java.util.concurrent.CompletionStage` in Java APIs. -It's a `scala.concurrent.Future` counterpart in Java; conversion from `scala.concurrent.Future` is done using -`scala-java8-compat` library. - -Unlike `scala.concurrent.Future` which has async methods only, `CompletionStage` has *async* and *non-async* methods. - -The `scala-java8-compat` library returns its own implementation of `CompletionStage` which delegates all *non-async* -methods to their *async* counterparts. The implementation extends standard Java `CompletableFuture`. -Java 8 `CompletableFuture` creates a new instance of `CompletableFuture` for any new stage, -which means `scala-java8-compat` implementation is not used after the first mapping method. - -@@@ note - -After adding any additional computation stage to `CompletionStage` returned by `scala-java8-compat` -(e.g. `CompletionStage` instances returned by Akka) it falls back to standard behaviour of Java `CompletableFuture`. - -@@@ - -Actions supplied for dependent completions of *non-async* methods may be performed by the thread -that completes the current `CompletableFuture`, or by any other caller of a completion method. - -All *async* methods without an explicit Executor are performed using the `ForkJoinPool.commonPool()` executor. - -### Non-async methods - -When non-async methods are applied on a not yet completed `CompletionStage`, they are completed by -the thread which completes initial `CompletionStage`: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-completion-thread } - -In this example Scala `Future` is converted to `CompletionStage` just like Akka does. -The completion is delayed: we are calling `thenApply` multiple times on a not yet complete `CompletionStage`, then -complete the `Future`. - -First `thenApply` is actually performed on `scala-java8-compat` instance and computational stage (lambda) execution -is delegated to default Java `thenApplyAsync` which is executed on `ForkJoinPool.commonPool()`. - -Second and third `thenApply` methods are executed on Java 8 `CompletableFuture` instance which executes computational -stages on the thread which completed the first stage. It is never executed on a thread of Scala `Future` because -default `thenApply` breaks the chain and executes on `ForkJoinPool.commonPool()`. - -In the next example `thenApply` methods are executed on an already completed `Future`/`CompletionStage`: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-main-thread } - -First `thenApply` is still executed on `ForkJoinPool.commonPool()` (because it is actually `thenApplyAsync` -which is always executed on global Java pool). - -Then we wait for stages to complete so second and third `thenApply` are executed on completed `CompletionStage`, -and stages are executed on the current thread - the thread which called second and third `thenApply`. - -### Async methods - -As mentioned above, default *async* methods are always executed on `ForkJoinPool.commonPool()`: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-async-default } - -`CompletionStage` also has *async* methods which take `Executor` as a second parameter, just like `Future`: - -@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-async-executor } - -This example is behaving like `Future`: every stage is executed on an explicitly specified `Executor`. - -@@@ note - -When in doubt, async methods with explicit executor should be used. Always async methods with a dedicated -executor/dispatcher for long-running or blocking computations, such as IO operations. - -@@@ - -See also: - - * [CompletionStage](https://docs.oracle.com/javase/8/jdocs/api/java/util/concurrent/CompletionStage.html) - * [CompletableFuture](https://docs.oracle.com/javase/8/jdocs/api/java/util/concurrent/CompletableFuture.html) - * [scala-java8-compat](https://github.com/scala/scala-java8-compat) \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/futures.md b/akka-docs/src/main/paradox/java/futures.md new file mode 120000 index 0000000000..afab2a277b --- /dev/null +++ b/akka-docs/src/main/paradox/java/futures.md @@ -0,0 +1 @@ +../scala/futures.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/index-actors.md b/akka-docs/src/main/paradox/java/index-actors.md deleted file mode 100644 index 6b7db6aa6b..0000000000 --- a/akka-docs/src/main/paradox/java/index-actors.md +++ /dev/null @@ -1,21 +0,0 @@ -# Actors - -@@toc { depth=2 } - -@@@ index - -* [actors](actors.md) -* [typed](typed.md) -* [fault-tolerance](fault-tolerance.md) -* [dispatchers](dispatchers.md) -* [mailboxes](mailboxes.md) -* [routing](routing.md) -* [fsm](fsm.md) -* [persistence](persistence.md) -* [persistence-schema-evolution](persistence-schema-evolution.md) -* [persistence-query](persistence-query.md) -* [persistence-query-leveldb](persistence-query-leveldb.md) -* [testing](testing.md) -* [typed-actors](typed-actors.md) - -@@@ diff --git a/akka-docs/src/main/paradox/java/index-actors.md b/akka-docs/src/main/paradox/java/index-actors.md new file mode 120000 index 0000000000..30b1c95631 --- /dev/null +++ b/akka-docs/src/main/paradox/java/index-actors.md @@ -0,0 +1 @@ +../scala/index-actors.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/index-futures.md b/akka-docs/src/main/paradox/java/index-futures.md deleted file mode 100644 index c0158c312f..0000000000 --- a/akka-docs/src/main/paradox/java/index-futures.md +++ /dev/null @@ -1,10 +0,0 @@ -# Futures and Agents - -@@toc { depth=2 } - -@@@ index - -* [futures](futures.md) -* [agents](agents.md) - -@@@ \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/index-futures.md b/akka-docs/src/main/paradox/java/index-futures.md new file mode 120000 index 0000000000..c138740f68 --- /dev/null +++ b/akka-docs/src/main/paradox/java/index-futures.md @@ -0,0 +1 @@ +../scala/index-futures.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/index-network.md b/akka-docs/src/main/paradox/java/index-network.md deleted file mode 100644 index d0c10f605e..0000000000 --- a/akka-docs/src/main/paradox/java/index-network.md +++ /dev/null @@ -1,24 +0,0 @@ -# Networking - -@@toc { depth=2 } - -@@@ index - -* [common/cluster](common/cluster.md) -* [cluster-usage](cluster-usage.md) -* [cluster-singleton](cluster-singleton.md) -* [distributed-pub-sub](distributed-pub-sub.md) -* [cluster-client](cluster-client.md) -* [cluster-sharding](cluster-sharding.md) -* [cluster-metrics](cluster-metrics.md) -* [distributed-data](distributed-data.md) -* [cluster-dc](cluster-dc.md) -* [remoting](remoting.md) -* [remoting-artery](remoting-artery.md) -* [serialization](serialization.md) -* [io](io.md) -* [io-tcp](io-tcp.md) -* [io-udp](io-udp.md) -* [camel](camel.md) - -@@@ \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/index-network.md b/akka-docs/src/main/paradox/java/index-network.md new file mode 120000 index 0000000000..30930d6954 --- /dev/null +++ b/akka-docs/src/main/paradox/java/index-network.md @@ -0,0 +1 @@ +../scala/index-network.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/logging.md b/akka-docs/src/main/paradox/java/logging.md deleted file mode 100644 index 71d89a1d0d..0000000000 --- a/akka-docs/src/main/paradox/java/logging.md +++ /dev/null @@ -1,500 +0,0 @@ -# Logging - -Logging in Akka is not tied to a specific logging backend. By default -log messages are printed to STDOUT, but you can plug-in a SLF4J logger or -your own logger. Logging is performed asynchronously to ensure that logging -has minimal performance impact. Logging generally means IO and locks, -which can slow down the operations of your code if it was performed -synchronously. - -## How to Log - -Create a `LoggingAdapter` and use the `error`, `warning`, `info`, or `debug` methods, -as illustrated in this example: - -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports } - -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #my-actor } - -The first parameter to `Logging.getLogger` could also be any -`LoggingBus`, specifically `system.eventStream()`; in the demonstrated -case, the actor system’s address is included in the `akkaSource` -representation of the log source (see [Logging Thread, Akka Source and Actor System in MDC](#logging-thread-akka-source-and-actor-system-in-mdc)) -while in the second case this is not automatically done. The second parameter -to `Logging.getLogger` is the source of this logging channel. The source -object is translated to a String according to the following rules: - - * if it is an Actor or ActorRef, its path is used - * in case of a String it is used as is - * in case of a class an approximation of its simpleName - * and in all other cases the simpleName of its class - -The log message may contain argument placeholders `{}`, which will be -substituted if the log level is enabled. Giving more arguments than -placeholders results in a warning being appended to the log statement (i.e. on -the same line with the same severity). You may pass a Java array as the only -substitution argument to have its elements be treated individually: - -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #array } - -The Java `Class` of the log source is also included in the generated -`LogEvent`. In case of a simple string this is replaced with a “marker” -class `akka.event.DummyClassForStringSources` in order to allow special -treatment of this case, e.g. in the SLF4J event listener which will then use -the string instead of the class’ name for looking up the logger instance to -use. - -### Logging of Dead Letters - -By default messages sent to dead letters are logged at info level. Existence of dead letters -does not necessarily indicate a problem, but they are logged by default for the sake of caution. -After a few messages this logging is turned off, to avoid flooding the logs. -You can disable this logging completely or adjust how many dead letters are -logged. During system shutdown it is likely that you see dead letters, since pending -messages in the actor mailboxes are sent to dead letters. You can also disable logging -of dead letters during shutdown. - -```ruby -akka { - log-dead-letters = 10 - log-dead-letters-during-shutdown = on -} -``` - -To customize the logging further or take other actions for dead letters you can subscribe -to the @ref:[Event Stream](event-bus.md#event-stream). - -### Auxiliary logging options - -Akka has a few configuration options for very low level debugging. These make more sense in development than in production. - -You almost definitely need to have logging set to DEBUG to use any of the options below: - -```ruby -akka { - loglevel = "DEBUG" -} -``` - -This config option is very good if you want to know what config settings are loaded by Akka: - -```ruby -akka { - # Log the complete configuration at INFO level when the actor system is started. - # This is useful when you are uncertain of what configuration is used. - log-config-on-start = on -} -``` - -If you want very detailed logging of all automatically received messages that are processed -by Actors: - -```ruby -akka { - actor { - debug { - # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill et.c.) - autoreceive = on - } - } -} -``` - -If you want very detailed logging of all lifecycle changes of Actors (restarts, deaths etc): - -```ruby -akka { - actor { - debug { - # enable DEBUG logging of actor lifecycle changes - lifecycle = on - } - } -} -``` - -If you want unhandled messages logged at DEBUG: - -```ruby -akka { - actor { - debug { - # enable DEBUG logging of unhandled messages - unhandled = on - } - } -} -``` - -If you want very detailed logging of all events, transitions and timers of FSM Actors that extend LoggingFSM: - -```ruby -akka { - actor { - debug { - # enable DEBUG logging of all LoggingFSMs for events, transitions and timers - fsm = on - } - } -} -``` - -If you want to monitor subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream: - -```ruby -akka { - actor { - debug { - # enable DEBUG logging of subscription changes on the eventStream - event-stream = on - } - } -} -``` - - -### Auxiliary remote logging options - -If you want to see all messages that are sent through remoting at DEBUG log level, use the following config option. Note that this logs the messages as they are sent by the transport layer, not by an actor. - -```ruby -akka { - remote { - # If this is "on", Akka will log all outbound messages at DEBUG level, - # if off then they are not logged - log-sent-messages = on - } -} -``` - -If you want to see all messages that are received through remoting at DEBUG log level, use the following config option. Note that this logs the messages as they are sent by the transport layer, not by an actor. - -```ruby -akka { - remote { - # If this is "on", Akka will log all inbound messages at DEBUG level, - # if off then they are not logged - log-received-messages = on - } -} -``` - -If you want to see message types with payload size in bytes larger than -a specified limit at INFO log level: - -```ruby -akka { - remote { - # Logging of message types with payload size in bytes larger than - # this value. Maximum detected size per message type is logged once, - # with an increase threshold of 10%. - # By default this feature is turned off. Activate it by setting the property to - # a value in bytes, such as 1000b. Note that for all messages larger than this - # limit there will be extra performance and scalability cost. - log-frame-size-exceeding = 1000b - } -} -``` - -Also see the logging options for TestKit: @ref:[actor.logging-java](testing.md#actor-logging). - -### Turn Off Logging - -To turn off logging you can configure the log levels to be `OFF` like this. - -```ruby -akka { - stdout-loglevel = "OFF" - loglevel = "OFF" -} -``` - -The `stdout-loglevel` is only in effect during system startup and shutdown, and setting -it to `OFF` as well, ensures that nothing gets logged during system startup or shutdown. - -## Loggers - -Logging is performed asynchronously through an event bus. Log events are processed by an event handler actor -that receives the log events in the same order they were emitted. - -@@@ note - -The event handler actor does not have a bounded inbox and is run on the default dispatcher. This means -that logging extreme amounts of data may affect your application badly. This can be somewhat mitigated by using an async logging backend though. (See [Using the SLF4J API directly](#slf4j-directly)) - -@@@ - -You can configure which event handlers are created at system start-up and listen to logging events. That is done using the -`loggers` element in the @ref:[configuration](general/configuration.md). -Here you can also define the log level. More fine grained filtering based on the log source -can be implemented in a custom `LoggingFilter`, which can be defined in the `logging-filter` -configuration property. - -```ruby -akka { - # Loggers to register at boot time (akka.event.Logging$DefaultLogger logs - # to STDOUT) - loggers = ["akka.event.Logging$DefaultLogger"] - # Options: OFF, ERROR, WARNING, INFO, DEBUG - loglevel = "DEBUG" -} -``` - -The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an [SLF4J](#slf4j) -logger available in the 'akka-slf4j' module. - -Example of creating a listener: - -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports #imports-listener } - -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #my-event-listener } - -## Logging to stdout during startup and shutdown - -While the actor system is starting up and shutting down the configured `loggers` are not used. -Instead log messages are printed to stdout (System.out). The default log level for this -stdout logger is `WARNING` and it can be silenced completely by setting -`akka.stdout-loglevel=OFF`. - - -## SLF4J - -Akka provides a logger for [SL4FJ](http://www.slf4j.org/). This module is available in the 'akka-slf4j.jar'. -It has a single dependency: the slf4j-api jar. In your runtime, you also need a SLF4J backend. We recommend [Logback](http://logback.qos.ch/): - -```xml - - ch.qos.logback - logback-classic - 1.2.3 - -``` - -You need to enable the Slf4jLogger in the `loggers` element in -the @ref:[configuration](general/configuration.md). Here you can also define the log level of the event bus. -More fine grained log levels can be defined in the configuration of the SLF4J backend -(e.g. logback.xml). You should also define `akka.event.slf4j.Slf4jLoggingFilter` in -the `logging-filter` configuration property. It will filter the log events using the backend -configuration (e.g. logback.xml) before they are published to the event bus. - -@@@ warning - -If you set the `loglevel` to a higher level than "DEBUG", any DEBUG events will be filtered -out already at the source and will never reach the logging backend, regardless of how the backend -is configured. - -@@@ - -```ruby -akka { - loggers = ["akka.event.slf4j.Slf4jLogger"] - loglevel = "DEBUG" - logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" -} -``` - -One gotcha is that the timestamp is attributed in the event handler, not when actually doing the logging. - -The SLF4J logger selected for each log event is chosen based on the -`Class` of the log source specified when creating the -`LoggingAdapter`, unless that was given directly as a string in which -case that string is used (i.e. `LoggerFactory.getLogger(Class c)` is used in -the first case and `LoggerFactory.getLogger(String s)` in the second). - -@@@ note - -Beware that the actor system’s name is appended to a `String` log -source if the LoggingAdapter was created giving an `ActorSystem` to -the factory. If this is not intended, give a `LoggingBus` instead as -shown below: - -@@@ - -```scala -final LoggingAdapter log = Logging.getLogger(system.eventStream(), "my.string"); -``` - - -### Using the SLF4J API directly - -If you use the SLF4J API directly in your application, remember that the logging operations will block -while the underlying infrastructure writes the log statements. - -This can be avoided by configuring the logging implementation to use -a non-blocking appender. Logback provides [AsyncAppender](http://logback.qos.ch/manual/appenders.html#AsyncAppender) -that does this. It also contains a feature which will drop `INFO` and `DEBUG` messages if the logging -load is high. - -### Logging Thread, Akka Source and Actor System in MDC - -Since the logging is done asynchronously the thread in which the logging was performed is captured in -Mapped Diagnostic Context (MDC) with attribute name `sourceThread`. -With Logback the thread name is available with `%X{sourceThread}` specifier within the pattern layout configuration: - -``` - - - %date{ISO8601} %-5level %logger{36} %X{sourceThread} - %msg%n - - -``` - -@@@ note - -It will probably be a good idea to use the `sourceThread` MDC value also in -non-Akka parts of the application in order to have this property consistently -available in the logs. - -@@@ - -Another helpful facility is that Akka captures the actor’s address when -instantiating a logger within it, meaning that the full instance identification -is available for associating log messages e.g. with members of a router. This -information is available in the MDC with attribute name `akkaSource`: - -``` - - - %date{ISO8601} %-5level %logger{36} %X{akkaSource} - %msg%n - - -``` - -Finally, the actor system in which the logging was performed -is available in the MDC with attribute name `sourceActorSystem`: - -``` - - - %date{ISO8601} %-5level %logger{36} %X{sourceActorSystem} - %msg%n - - -``` - -For more details on what this attribute contains—also for non-actors—please see -[How to Log](#how-to-log). - -### More accurate timestamps for log output in MDC - -Akka's logging is asynchronous which means that the timestamp of a log entry is taken from -when the underlying logger implementation is called, which can be surprising at first. -If you want to more accurately output the timestamp, use the MDC attribute `akkaTimestamp`: - -``` - - - %X{akkaTimestamp} %-5level %logger{36} %X{akkaSource} - %msg%n - - -``` - -### MDC values defined by the application - -One useful feature available in Slf4j is [MDC](http://logback.qos.ch/manual/mdc.html), -Akka has a way to let the application specify custom values, you just need to get a -specialized `LoggingAdapter`, the `DiagnosticLoggingAdapter`. In order to -get it you can use the factory, providing an AbstractActor as logSource: - -```scala -// Within your AbstractActor -final DiagnosticLoggingAdapter log = Logging.getLogger(this); -``` - -Once you have the logger, you just need to add the custom values before you log something. -This way, the values will be put in the SLF4J MDC right before appending the log and removed after. - -@@@ note - -The cleanup (removal) should be done in the actor at the end, -otherwise, the next message will log with same MDC values, -if it is not set to a new map. Use `log.clearMDC()`. - -@@@ - -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports-mdc } - -@@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #mdc-actor } - -Now, the values will be available in the MDC, so you can use them in the layout pattern: - -``` - - - - %-5level %logger{36} [req: %X{requestId}, visitor: %X{visitorId}] - %msg%n - - - -``` - -### Using Markers - -Some logging libraries allow, in addition to MDC data, attaching so called "markers" to log statements. -These are used to filter out rare and special events, for example you might want to mark logs that detect -some malicious activity and mark them with a `SECURITY` tag, and in your appender configuration make these -trigger emails and other notifications immediately. - -Markers are available through the LoggingAdapters, when obtained via `Logging.withMarker`. -The first argument passed into all log calls then should be a `akka.event.LogMarker`. - -The slf4j bridge provided by akka in `akka-slf4j` will automatically pick up this marker value and make it available to SLF4J. -For example you could use it like this: - -``` -%date{ISO8601} [%marker][%level] [%msg]%n -``` - -A more advanced (including most Akka added information) example pattern would be: - -``` -%date{ISO8601} level=[%level] marker=[%marker] logger=[%logger] akkaSource=[%X{akkaSource}] sourceActorSystem=[%X{sourceActorSystem}] sourceThread=[%X{sourceThread}] mdc=[ticket-#%X{ticketNumber}: %X{ticketDesc}] - msg=[%msg]%n----%n -``` - - -## java.util.logging - -Akka includes a logger for [java.util.logging](https://docs.oracle.com/javase/8/jdocs/api/java/util/logging/package-summary.html#package.description). - -You need to enable the `akka.event.jul.JavaLogger` in the `loggers` element in -the @ref:[configuration](general/configuration.md). Here you can also define the log level of the event bus. -More fine grained log levels can be defined in the configuration of the logging backend. -You should also define `akka.event.jul.JavaLoggingFilter` in -the `logging-filter` configuration property. It will filter the log events using the backend -configuration before they are published to the event bus. - -@@@ warning - -If you set the `loglevel` to a higher level than "DEBUG", any DEBUG events will be filtered -out already at the source and will never reach the logging backend, regardless of how the backend -is configured. - -@@@ - -```ruby -akka { - loglevel = DEBUG - loggers = ["akka.event.jul.JavaLogger"] - logging-filter = "akka.event.jul.JavaLoggingFilter" -} -``` - -One gotcha is that the timestamp is attributed in the event handler, not when actually doing the logging. - -The `java.util.logging.Logger` selected for each log event is chosen based on the -`Class` of the log source specified when creating the -`LoggingAdapter`, unless that was given directly as a string in which -case that string is used (i.e. `LoggerFactory.getLogger(Class c)` is used in -the first case and `LoggerFactory.getLogger(String s)` in the second). - -@@@ note - -Beware that the actor system’s name is appended to a `String` log -source if the LoggingAdapter was created giving an `ActorSystem` to -the factory. If this is not intended, give a `LoggingBus` instead as -shown below: - -@@@ - -```scala -final LoggingAdapter log = Logging.getLogger(system.eventStream(), "my.string"); -``` diff --git a/akka-docs/src/main/paradox/java/logging.md b/akka-docs/src/main/paradox/java/logging.md new file mode 120000 index 0000000000..b8216835a7 --- /dev/null +++ b/akka-docs/src/main/paradox/java/logging.md @@ -0,0 +1 @@ +../scala/logging.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/mailboxes.md b/akka-docs/src/main/paradox/java/mailboxes.md deleted file mode 100644 index 0bed48c648..0000000000 --- a/akka-docs/src/main/paradox/java/mailboxes.md +++ /dev/null @@ -1,273 +0,0 @@ -# Mailboxes - -An Akka `Mailbox` holds the messages that are destined for an `Actor`. -Normally each `Actor` has its own mailbox, but with for example a `BalancingPool` -all routees will share a single mailbox instance. - -## Mailbox Selection - -### Requiring a Message Queue Type for an Actor - -It is possible to require a certain type of message queue for a certain type of actor -by having that actor implement the parameterized interface `RequiresMessageQueue`. Here is -an example: - -@@snip [MyBoundedActor.java]($code$/java/jdocs/actor/MyBoundedActor.java) { #my-bounded-untyped-actor } - -The type parameter to the `RequiresMessageQueue` interface needs to be mapped to a mailbox in -configuration like this: - -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #bounded-mailbox-config #required-mailbox-config } - -Now every time you create an actor of type `MyBoundedActor` it will try to get a bounded -mailbox. If the actor has a different mailbox configured in deployment, either directly or via -a dispatcher with a specified mailbox type, then that will override this mapping. - -@@@ note - -The type of the queue in the mailbox created for an actor will be checked against the required type in the -interface and if the queue doesn't implement the required type then actor creation will fail. - -@@@ - -### Requiring a Message Queue Type for a Dispatcher - -A dispatcher may also have a requirement for the mailbox type used by the -actors running on it. An example is the BalancingDispatcher which requires a -message queue that is thread-safe for multiple concurrent consumers. Such a -requirement is formulated within the dispatcher configuration section like -this: - -``` -my-dispatcher { - mailbox-requirement = org.example.MyInterface -} -``` - -The given requirement names a class or interface which will then be ensured to -be a supertype of the message queue’s implementation. In case of a -conflict—e.g. if the actor requires a mailbox type which does not satisfy this -requirement—then actor creation will fail. - -### How the Mailbox Type is Selected - -When an actor is created, the `ActorRefProvider` first determines the -dispatcher which will execute it. Then the mailbox is determined as follows: - - 1. If the actor’s deployment configuration section contains a `mailbox` key -then that names a configuration section describing the mailbox type to be -used. - 2. If the actor’s `Props` contains a mailbox selection—i.e. `withMailbox` -was called on it—then that names a configuration section describing the -mailbox type to be used. - 3. If the dispatcher’s configuration section contains a `mailbox-type` key -the same section will be used to configure the mailbox type. - 4. If the actor requires a mailbox type as described above then the mapping for -that requirement will be used to determine the mailbox type to be used; if -that fails then the dispatcher’s requirement—if any—will be tried instead. - 5. If the dispatcher requires a mailbox type as described above then the -mapping for that requirement will be used to determine the mailbox type to -be used. - 6. The default mailbox `akka.actor.default-mailbox` will be used. - -### Default Mailbox - -When the mailbox is not specified as described above the default mailbox -is used. By default it is an unbounded mailbox, which is backed by a -`java.util.concurrent.ConcurrentLinkedQueue`. - -`SingleConsumerOnlyUnboundedMailbox` is an even more efficient mailbox, and -it can be used as the default mailbox, but it cannot be used with a BalancingDispatcher. - -Configuration of `SingleConsumerOnlyUnboundedMailbox` as default mailbox: - -``` -akka.actor.default-mailbox { - mailbox-type = "akka.dispatch.SingleConsumerOnlyUnboundedMailbox" -} -``` - -### Which Configuration is passed to the Mailbox Type - -Each mailbox type is implemented by a class which extends `MailboxType` -and takes two constructor arguments: a `ActorSystem.Settings` object and -a `Config` section. The latter is computed by obtaining the named -configuration section from the actor system’s configuration, overriding its -`id` key with the configuration path of the mailbox type and adding a -fall-back to the default mailbox configuration section. - -## Builtin Mailbox Implementations - -Akka comes shipped with a number of mailbox implementations: - - * - **UnboundedMailbox** (default) - * The default mailbox - * Backed by a `java.util.concurrent.ConcurrentLinkedQueue` - * Blocking: No - * Bounded: No - * Configuration name: `"unbounded"` or `"akka.dispatch.UnboundedMailbox"` - * - **SingleConsumerOnlyUnboundedMailbox** - This queue may or may not be faster than the default one depending on your use-case—be sure to benchmark properly! - * Backed by a Multiple-Producer Single-Consumer queue, cannot be used with `BalancingDispatcher` - * Blocking: No - * Bounded: No - * Configuration name: `"akka.dispatch.SingleConsumerOnlyUnboundedMailbox"` - * - **NonBlockingBoundedMailbox** - * Backed by a very efficient Multiple-Producer Single-Consumer queue - * Blocking: No (discards overflowing messages into deadLetters) - * Bounded: Yes - * Configuration name: `"akka.dispatch.NonBlockingBoundedMailbox"` - * - **UnboundedControlAwareMailbox** - * Delivers messages that extend `akka.dispatch.ControlMessage` with higher priority - * Backed by two `java.util.concurrent.ConcurrentLinkedQueue` - * Blocking: No - * Bounded: No - * Configuration name: "akka.dispatch.UnboundedControlAwareMailbox" - * - **UnboundedPriorityMailbox** - * Backed by a `java.util.concurrent.PriorityBlockingQueue` - * Delivery order for messages of equal priority is undefined - contrast with the UnboundedStablePriorityMailbox - * Blocking: No - * Bounded: No - * Configuration name: "akka.dispatch.UnboundedPriorityMailbox" - * - **UnboundedStablePriorityMailbox** - * Backed by a `java.util.concurrent.PriorityBlockingQueue` wrapped in an `akka.util.PriorityQueueStabilizer` - * FIFO order is preserved for messages of equal priority - contrast with the UnboundedPriorityMailbox - * Blocking: No - * Bounded: No - * Configuration name: "akka.dispatch.UnboundedStablePriorityMailbox" - -Other bounded mailbox implementations which will block the sender if the capacity is reached and -configured with non-zero `mailbox-push-timeout-time`. - -@@@ note - -The following mailboxes should only be used with zero `mailbox-push-timeout-time`. - -@@@ - - * **BoundedMailbox** - * Backed by a `java.util.concurrent.LinkedBlockingQueue` - * Blocking: Yes if used with non-zero `mailbox-push-timeout-time`, otherwise No - * Bounded: Yes - * Configuration name: "bounded" or "akka.dispatch.BoundedMailbox" - * **BoundedPriorityMailbox** - * Backed by a `java.util.PriorityQueue` wrapped in an `akka.util.BoundedBlockingQueue` - * Delivery order for messages of equal priority is undefined - contrast with the `BoundedStablePriorityMailbox` - * Blocking: Yes if used with non-zero `mailbox-push-timeout-time`, otherwise No - * Bounded: Yes - * Configuration name: `"akka.dispatch.BoundedPriorityMailbox"` - * **BoundedStablePriorityMailbox** - * Backed by a `java.util.PriorityQueue` wrapped in an `akka.util.PriorityQueueStabilizer` and an `akka.util.BoundedBlockingQueue` - * FIFO order is preserved for messages of equal priority - contrast with the BoundedPriorityMailbox - * Blocking: Yes if used with non-zero `mailbox-push-timeout-time`, otherwise No - * Bounded: Yes - * Configuration name: "akka.dispatch.BoundedStablePriorityMailbox" - * **BoundedControlAwareMailbox** - * Delivers messages that extend `akka.dispatch.ControlMessage` with higher priority - * Backed by two `java.util.concurrent.ConcurrentLinkedQueue` and blocking on enqueue if capacity has been reached - * Blocking: Yes if used with non-zero `mailbox-push-timeout-time`, otherwise No - * Bounded: Yes - * Configuration name: "akka.dispatch.BoundedControlAwareMailbox" - -## Mailbox configuration examples - -### PriorityMailbox - -How to create a PriorityMailbox: - -@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-mailbox } - -And then add it to the configuration: - -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher-config } - -And then an example on how you would use it: - -@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-dispatcher } - -It is also possible to configure a mailbox type directly like this: - -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config-java #mailbox-deployment-config } - -And then use it either from deployment like this: - -@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-config } - -Or code like this: - -@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-code } - -### ControlAwareMailbox - -A `ControlAwareMailbox` can be very useful if an actor needs to be able to receive control messages -immediately no matter how many other messages are already in its mailbox. - -It can be configured like this: - -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-config } - -Control messages need to extend the `ControlMessage` trait: - -@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-mailbox-messages } - -And then an example on how you would use it: - -@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-dispatcher } - -## Creating your own Mailbox type - -An example is worth a thousand quacks: - -@@snip [MyUnboundedMailbox.java]($code$/java/jdocs/dispatcher/MyUnboundedMailbox.java) { #mailbox-implementation-example } - -@@snip [MyUnboundedMessageQueueSemantics.java]($code$/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java) { #mailbox-implementation-example } - -And then you just specify the FQCN of your MailboxType as the value of the "mailbox-type" in the dispatcher -configuration, or the mailbox configuration. - -@@@ note - -Make sure to include a constructor which takes -`akka.actor.ActorSystem.Settings` and `com.typesafe.config.Config` -arguments, as this constructor is invoked reflectively to construct your -mailbox type. The config passed in as second argument is that section from -the configuration which describes the dispatcher or mailbox setting using -this mailbox type; the mailbox type will be instantiated once for each -dispatcher or mailbox setting using it. - -@@@ - -You can also use the mailbox as a requirement on the dispatcher like this: - -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #custom-mailbox-config-java } - -Or by defining the requirement on your actor class like this: - -@@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #require-mailbox-on-actor } - -## Special Semantics of `system.actorOf` - -In order to make `system.actorOf` both synchronous and non-blocking while -keeping the return type `ActorRef` (and the semantics that the returned -ref is fully functional), special handling takes place for this case. Behind -the scenes, a hollow kind of actor reference is constructed, which is sent to -the system’s guardian actor who actually creates the actor and its context and -puts those inside the reference. Until that has happened, messages sent to the -`ActorRef` will be queued locally, and only upon swapping the real -filling in will they be transferred into the real mailbox. Thus, - -```scala -final Props props = ... -// this actor uses MyCustomMailbox, which is assumed to be a singleton -system.actorOf(props.withDispatcher("myCustomMailbox").tell("bang", sender); -assert(MyCustomMailbox.getInstance().getLastEnqueued().equals("bang")); -``` - -will probably fail; you will have to allow for some time to pass and retry the -check à la `TestKit.awaitCond`. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/mailboxes.md b/akka-docs/src/main/paradox/java/mailboxes.md new file mode 120000 index 0000000000..9050f2cda1 --- /dev/null +++ b/akka-docs/src/main/paradox/java/mailboxes.md @@ -0,0 +1 @@ +../scala/mailboxes.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/remoting-artery.md b/akka-docs/src/main/paradox/java/remoting-artery.md deleted file mode 100644 index 8f639ead2e..0000000000 --- a/akka-docs/src/main/paradox/java/remoting-artery.md +++ /dev/null @@ -1,793 +0,0 @@ -# Remoting (codename Artery) - -@@@ note - -This page describes the @ref:[may change](common/may-change.md) remoting subsystem, codenamed *Artery* that will eventually -replace the old remoting implementation. For the current stable remoting system please refer to @ref:[Remoting](remoting.md). - -@@@ - -Remoting enables Actor systems on different hosts or JVMs to communicate with each other. By enabling remoting -the system will start listening on a provided network address and also gains the ability to connect to other -systems through the network. From the application's perspective there is no API difference between local or remote -systems, `ActorRef` instances that point to remote systems look exactly the same as local ones: they can be -sent messages to, watched, etc. -Every `ActorRef` contains hostname and port information and can be passed around even on the network. This means -that on a network every `ActorRef` is a unique identifier of an actor on that network. - -Remoting is not a server-client technology. All systems using remoting can contact any other system on the network -if they possess an `ActorRef` pointing to those system. This means that every system that is remoting enabled -acts as a "server" to which arbitrary systems on the same network can connect to. - -## What is new in Artery - -Artery is a reimplementation of the old remoting module aimed at improving performance and stability. It is mostly -source compatible with the old implementation and it is a drop-in replacement in many cases. Main features -of Artery compared to the previous implementation: - - * Based on [Aeron](https://github.com/real-logic/Aeron) (UDP) instead of TCP - * Focused on high-throughput, low-latency communication - * Isolation of internal control messages from user messages improving stability and reducing false failure detection -in case of heavy traffic by using a dedicated subchannel. - * Mostly allocation-free operation - * Support for a separate subchannel for large messages to avoid interference with smaller messages - * Compression of actor paths on the wire to reduce overhead for smaller messages - * Support for faster serialization/deserialization using ByteBuffers directly - * Built-in Flight-Recorder to help debugging implementation issues without polluting users logs with implementation -specific events - * Providing protocol stability across major Akka versions to support rolling updates of large-scale systems - -The main incompatible change from the previous implementation that the protocol field of the string representation of an -`ActorRef` is always *akka* instead of the previously used *akka.tcp* or *akka.ssl.tcp*. Configuration properties -are also different. - -## Preparing your ActorSystem for Remoting - -The Akka remoting is a separate jar file. Make sure that you have the following dependency in your project: - -@@@vars -``` - - com.typesafe.akka - akka-remote_$scala.binary_version$ - $akka.version$ - -``` -@@@ - -To enable remote capabilities in your Akka project you should, at a minimum, add the following changes -to your `application.conf` file: - -``` -akka { - actor { - provider = remote - } - remote { - artery { - enabled = on - canonical.hostname = "127.0.0.1" - canonical.port = 25520 - } - } -} -``` - -As you can see in the example above there are four things you need to add to get started: - - * Change provider from `local` to `remote` - * Enable Artery to use it as the remoting implementation - * Add host name - the machine you want to run the actor system on; this host -name is exactly what is passed to remote systems in order to identify this -system and consequently used for connecting back to this system if need be, -hence set it to a reachable IP address or resolvable name in case you want to -communicate across the network. - * Add port number - the port the actor system should listen on, set to 0 to have it chosen automatically - -@@@ note - -The port number needs to be unique for each actor system on the same machine even if the actor -systems have different names. This is because each actor system has its own networking subsystem -listening for connections and handling messages as not to interfere with other actor systems. - -@@@ - -The example above only illustrates the bare minimum of properties you have to add to enable remoting. -All settings are described in [Remote Configuration](#remote-configuration-artery). - -@@@ note - -Aeron requires 64bit JVM to work reliably. - -@@@ - -### Canonical address - -In order to remoting to work properly, where each system can send messages to any other system on the same network -(for example a system forwards a message to a third system, and the third replies directly to the sender system) -it is essential for every system to have a *unique, globally reachable* address and port. This address is part of the -unique name of the system and will be used by other systems to open a connection to it and send messages. This means -that if a host has multiple names (different DNS records pointing to the same IP address) then only one of these -can be *canonical*. If a message arrives to a system but it contains a different hostname than the expected canonical -name then the message will be dropped. If multiple names for a system would be allowed, then equality checks among -`ActorRef` instances would no longer to be trusted and this would violate the fundamental assumption that -an actor has a globally unique reference on a given network. As a consequence, this also means that localhost addresses -(e.g. *127.0.0.1*) cannot be used in general (apart from local development) since they are not unique addresses in a -real network. - -In cases, where Network Address Translation (NAT) is used or other network bridging is involved, it is important -to configure the system so that it understands that there is a difference between his externally visible, canonical -address and between the host-port pair that is used to listen for connections. See [Akka behind NAT or in a Docker container](#remote-configuration-nat-artery) -for details. - -## Acquiring references to remote actors - -In order to communicate with an actor, it is necessary to have its `ActorRef`. In the local case it is usually -the creator of the actor (the caller of `actorOf()`) is who gets the `ActorRef` for an actor that it can -then send to other actors. In other words: - - * An Actor can get a remote Actor's reference simply by receiving a message from it (as it's available as `getSender()` then), -or inside of a remote message (e.g. *PleaseReply(message: String, remoteActorRef: ActorRef)*) - -Alternatively, an actor can look up another located at a known path using -`ActorSelection`. These methods are available even in remoting enabled systems: - - * Remote Lookup : used to look up an actor on a remote node with `actorSelection(path)` - * Remote Creation : used to create an actor on a remote node with `actorOf(Props(...), actorName)` - -In the next sections the two alternatives are described in detail. - -### Looking up Remote Actors - -`actorSelection(path)` will obtain an `ActorSelection` to an Actor on a remote node, e.g.: - -``` -ActorSelection selection = - context.actorSelection("akka://actorSystemName@10.0.0.1:25520/user/actorName"); -``` - -As you can see from the example above the following pattern is used to find an actor on a remote node: - -``` -akka://@:/ -``` - -@@@ note - -Unlike with earlier remoting, the protocol field is always *akka* as pluggable transports are no longer supported. - -@@@ - -Once you obtained a selection to the actor you can interact with it in the same way you would with a local actor, e.g.: - -``` -selection.tell("Pretty awesome feature", getSelf()); -``` - -To acquire an `ActorRef` for an `ActorSelection` you need to -send a message to the selection and use the `sender` reference of the reply from -the actor. There is a built-in `Identify` message that all Actors will understand -and automatically reply to with a `ActorIdentity` message containing the -`ActorRef`. This can also be done with the `resolveOne` method of -the `ActorSelection`, which returns a `Future` of the matching -`ActorRef`. - -For more details on how actor addresses and paths are formed and used, please refer to @ref:[Actor References, Paths and Addresses](general/addressing.md). - -@@@ note - -Message sends to actors that are actually in the sending actor system do not -get delivered via the remote actor ref provider. They're delivered directly, -by the local actor ref provider. - -Aside from providing better performance, this also means that if the hostname -you configure remoting to listen as cannot actually be resolved from within -the very same actor system, such messages will (perhaps counterintuitively) -be delivered just fine. - -@@@ - -### Creating Actors Remotely - -If you want to use the creation functionality in Akka remoting you have to further amend the -`application.conf` file in the following way (only showing deployment section): - -``` -akka { - actor { - deployment { - /sampleActor { - remote = "akka://sampleActorSystem@127.0.0.1:2553" - } - } - } -} -``` - -The configuration above instructs Akka to react when an actor with path `/sampleActor` is created, i.e. -using `system.actorOf(Props(...), "sampleActor")`. This specific actor will not be directly instantiated, -but instead the remote daemon of the remote system will be asked to create the actor, -which in this sample corresponds to `sampleActorSystem@127.0.0.1:2553`. - -Once you have configured the properties above you would do the following in code: - -@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } - -The actor class `SampleActor` has to be available to the runtimes using it, i.e. the classloader of the -actor systems has to have a JAR containing the class. - -@@@ note - -In order to ensure serializability of `Props` when passing constructor -arguments to the actor being created, do not make the factory an inner class: -this will inherently capture a reference to its enclosing object, which in -most cases is not serializable. It is best to create a factory method in the -companion object of the actor’s class. - -Serializability of all Props can be tested by setting the configuration item -`akka.actor.serialize-creators=on`. Only Props whose `deploy` has -`LocalScope` are exempt from this check. - -@@@ - -You can use asterisks as wildcard matches for the actor paths, so you could specify: -`/*/sampleActor` and that would match all `sampleActor` on that level in the hierarchy. -You can also use wildcard in the last position to match all actors at a certain level: -`/someParent/*`. Non-wildcard matches always have higher priority to match than wildcards, so: -`/foo/bar` is considered **more specific** than `/foo/*` and only the highest priority match is used. -Please note that it **cannot** be used to partially match section, like this: `/foo*/bar`, `/f*o/bar` etc. - -### Programmatic Remote Deployment - -To allow dynamically deployed systems, it is also possible to include -deployment configuration in the `Props` which are used to create an -actor: this information is the equivalent of a deployment section from the -configuration file, and if both are given, the external configuration takes -precedence. - -With these imports: - -@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } - -and a remote address like this: - -@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address-artery } - -you can advise the system to create a child on that remote node like so: - -@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } - -### Remote deployment whitelist - -As remote deployment can potentially be abused by both users and even attackers a whitelist feature -is available to guard the ActorSystem from deploying unexpected actors. Please note that remote deployment -is *not* remote code loading, the Actors class to be deployed onto a remote system needs to be present on that -remote system. This still however may pose a security risk, and one may want to restrict remote deployment to -only a specific set of known actors by enabling the whitelist feature. - -To enable remote deployment whitelisting set the `akka.remote.deployment.enable-whitelist` value to `on`. -The list of allowed classes has to be configured on the "remote" system, in other words on the system onto which -others will be attempting to remote deploy Actors. That system, locally, knows best which Actors it should or -should not allow others to remote deploy onto it. The full settings section may for example look like this: - -@@snip [RemoteDeploymentWhitelistSpec.scala]($akka$/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala) { #whitelist-config } - -Actor classes not included in the whitelist will not be allowed to be remote deployed onto this system. - - -## Remote Security - -An `ActorSystem` should not be exposed via Akka Remote (Artery) over plain Aeron/UDP to an untrusted network (e.g. internet). -It should be protected by network security, such as a firewall. There is currently no support for encryption with Artery -so if network security is not considered as enough protection the classic remoting with -@ref:[TLS and mutual authentication](remoting.md#remote-tls) should be used. - -Best practice is that Akka remoting nodes should only be accessible from the adjacent network. - -It is also security best-practice to [disable the Java serializer](#disable-java-serializer-java-artery) because of -its multiple [known attack surfaces](https://community.hpe.com/t5/Security-Research/The-perils-of-Java-deserialization/ba-p/6838995). - -### Untrusted Mode - -As soon as an actor system can connect to another remotely, it may in principle -send any possible message to any actor contained within that remote system. One -example may be sending a `PoisonPill` to the system guardian, shutting -that system down. This is not always desired, and it can be disabled with the -following setting: - -``` -akka.remote.artery.untrusted-mode = on -``` - -This disallows sending of system messages (actor life-cycle commands, -DeathWatch, etc.) and any message extending `PossiblyHarmful` to the -system on which this flag is set. Should a client send them nonetheless they -are dropped and logged (at DEBUG level in order to reduce the possibilities for -a denial of service attack). `PossiblyHarmful` covers the predefined -messages like `PoisonPill` and `Kill`, but it can also be added -as a marker trait to user-defined messages. - -@@@ warning - -Untrusted mode does not give full protection against attacks by itself. -It makes it slightly harder to perform malicious or unintended actions but -it should be complemented with [disabled Java serializer](#disable-java-serializer-java-artery). -Additional protection can be achieved when running in an untrusted network by -network security (e.g. firewalls). - -@@@ - -Messages sent with actor selection are by default discarded in untrusted mode, but -permission to receive actor selection messages can be granted to specific actors -defined in configuration: - -``` -akka.remote.artery.trusted-selection-paths = ["/user/receptionist", "/user/namingService"] -``` - -The actual message must still not be of type `PossiblyHarmful`. - -In summary, the following operations are ignored by a system configured in -untrusted mode when incoming via the remoting layer: - - * remote deployment (which also means no remote supervision) - * remote DeathWatch - * `system.stop()`, `PoisonPill`, `Kill` - * sending any message which extends from the `PossiblyHarmful` marker -interface, which includes `Terminated` - * messages sent with actor selection, unless destination defined in `trusted-selection-paths`. - -@@@ note - -Enabling the untrusted mode does not remove the capability of the client to -freely choose the target of its message sends, which means that messages not -prohibited by the above rules can be sent to any actor in the remote system. -It is good practice for a client-facing system to only contain a well-defined -set of entry point actors, which then forward requests (possibly after -performing validation) to another actor system containing the actual worker -actors. If messaging between these two server-side systems is done using -local `ActorRef` (they can be exchanged safely between actor systems -within the same JVM), you can restrict the messages on this interface by -marking them `PossiblyHarmful` so that a client cannot forge them. - -@@@ - -## Quarantine - -Akka remoting is using Aeron as underlying message transport. Aeron is using UDP and adds -among other things reliable delivery and session semantics, very similar to TCP. This means that -the order of the messages are preserved, which is needed for the @ref:[Actor message ordering guarantees](general/message-delivery-reliability.md#message-ordering). -Under normal circumstances all messages will be delivered but there are cases when messages -may not be delivered to the destination: - - * during a network partition and the Aeron session is broken, this automatically recovered once the partition is over - * when sending too many messages without flow control and thereby filling up the outbound send queue (`outbound-message-queue-size` config) - * if serialization or deserialization of a message fails (only that message will be dropped) - * if an unexpected exception occurs in the remoting infrastructure - -In short, Actor message delivery is “at-most-once” as described in @ref:[Message Delivery Reliability](general/message-delivery-reliability.md) - -Some messages in Akka are called system messages and those cannot be dropped because that would result -in an inconsistent state between the systems. Such messages are used for essentially two features; remote death -watch and remote deployment. These messages are delivered by Akka remoting with “exactly-once” guarantee by -confirming each message and resending unconfirmed messages. If a system message anyway cannot be delivered the -association with the destination system is irrecoverable failed, and Terminated is signaled for all watched -actors on the remote system. It is placed in a so called quarantined state. Quarantine usually does not -happen if remote watch or remote deployment is not used. - -Each `ActorSystem` instance has an unique identifier (UID), which is important for differentiating between -incarnations of a system when it is restarted with the same hostname and port. It is the specific -incarnation (UID) that is quarantined. The only way to recover from this state is to restart one of the -actor systems. - -Messages that are sent to and received from a quarantined system will be dropped. However, it is possible to -send messages with `actorSelection` to the address of a quarantined system, which is useful to probe if the -system has been restarted. - -An association will be quarantined when: - - * Cluster node is removed from the cluster membership. - * Remote failure detector triggers, i.e. remote watch is used. This is different when @ref:[Akka Cluster](cluster-usage.md) -is used. The unreachable observation by the cluster failure detector can go back to reachable if the network -partition heals. A cluster member is not quarantined when the failure detector triggers. - * Overflow of the system message delivery buffer, e.g. because of too many `watch` requests at the same time -(`system-message-buffer-size` config). - * Unexpected exception occurs in the control subchannel of the remoting infrastructure. - -The UID of the `ActorSystem` is exchanged in a two-way handshake when the first message is sent to -a destination. The handshake will be retried until the other system replies and no other messages will -pass through until the handshake is completed. If the handshake cannot be established within a timeout -(`handshake-timeout` config) the association is stopped (freeing up resources). Queued messages will be -dropped if the handshake cannot be established. It will not be quarantined, because the UID is unknown. -New handshake attempt will start when next message is sent to the destination. - -Handshake requests are actually also sent periodically to be able to establish a working connection -when the destination system has been restarted. - -### Watching Remote Actors - -Watching a remote actor is API wise not different than watching a local actor, as described in -@ref:[Lifecycle Monitoring aka DeathWatch](actors.md#deathwatch). However, it is important to note, that unlike in the local case, remoting has to handle -when a remote actor does not terminate in a graceful way sending a system message to notify the watcher actor about -the event, but instead being hosted on a system which stopped abruptly (crashed). These situations are handled -by the built-in failure detector. - -### Failure Detector - -Under the hood remote death watch uses heartbeat messages and a failure detector to generate `Terminated` -message from network failures and JVM crashes, in addition to graceful termination of watched -actor. - -The heartbeat arrival times is interpreted by an implementation of -[The Phi Accrual Failure Detector](http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf). - -The suspicion level of failure is given by a value called *phi*. -The basic idea of the phi failure detector is to express the value of *phi* on a scale that -is dynamically adjusted to reflect current network conditions. - -The value of *phi* is calculated as: - -``` -phi = -log10(1 - F(timeSinceLastHeartbeat)) -``` - -where F is the cumulative distribution function of a normal distribution with mean -and standard deviation estimated from historical heartbeat inter-arrival times. - -In the [Remote Configuration](#remote-configuration-artery) you can adjust the `akka.remote.watch-failure-detector.threshold` -to define when a *phi* value is considered to be a failure. - -A low `threshold` is prone to generate many false positives but ensures -a quick detection in the event of a real crash. Conversely, a high `threshold` -generates fewer mistakes but needs more time to detect actual crashes. The -default `threshold` is 10 and is appropriate for most situations. However in -cloud environments, such as Amazon EC2, the value could be increased to 12 in -order to account for network issues that sometimes occur on such platforms. - -The following chart illustrates how *phi* increase with increasing time since the -previous heartbeat. - -![phi1.png](../images/phi1.png) - -Phi is calculated from the mean and standard deviation of historical -inter arrival times. The previous chart is an example for standard deviation -of 200 ms. If the heartbeats arrive with less deviation the curve becomes steeper, -i.e. it is possible to determine failure more quickly. The curve looks like this for -a standard deviation of 100 ms. - -![phi2.png](../images/phi2.png) - -To be able to survive sudden abnormalities, such as garbage collection pauses and -transient network failures the failure detector is configured with a margin, -`akka.remote.watch-failure-detector.acceptable-heartbeat-pause`. You may want to -adjust the [Remote Configuration](#remote-configuration-artery) of this depending on you environment. -This is how the curve looks like for `acceptable-heartbeat-pause` configured to -3 seconds. - -![phi3.png](../images/phi3.png) - -## Serialization - -When using remoting for actors you must ensure that the `props` and `messages` used for -those actors are serializable. Failing to do so will cause the system to behave in an unintended way. - -For more information please see @ref:[Serialization](serialization.md). - - -### ByteBuffer based serialization - -Artery introduces a new serialization mechanism which allows the `ByteBufferSerializer` to directly write into a -shared `java.nio.ByteBuffer` instead of being forced to allocate and return an `Array[Byte]` for each serialized -message. For high-throughput messaging this API change can yield significant performance benefits, so we recommend -changing your serializers to use this new mechanism. - -This new API also plays well with new versions of Google Protocol Buffers and other serialization libraries, which gained -the ability to serialize directly into and from ByteBuffers. - -As the new feature only changes how bytes are read and written, and the rest of the serialization infrastructure -remained the same, we recommend reading the @ref:[Serialization](serialization.md) documentation first. - -Implementing an `akka.serialization.ByteBufferSerializer` works the same way as any other serializer, - -@@snip [ByteBufferSerializerDocTest.java]($code$/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #ByteBufferSerializer-interface } - -Implementing a serializer for Artery is therefore as simple as implementing this interface, and binding the serializer -as usual (which is explained in @ref:[Serialization](serialization.md)). - -Implementations should typically extend `SerializerWithStringManifest` and in addition to the `ByteBuffer` based -`toBinary` and `fromBinary` methods also implement the array based `toBinary` and `fromBinary` methods. -The array based methods will be used when `ByteBuffer` is not used, e.g. in Akka Persistence. - -Note that the array based methods can be implemented by delegation like this: - -@@snip [ByteBufferSerializerDocTest.java]($code$/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #bytebufserializer-with-manifest } - - -### Disabling the Java Serializer - -It is possible to completely disable Java Serialization for the entire Actor system. - -Java serialization is known to be slow and [prone to attacks](https://community.hpe.com/t5/Security-Research/The-perils-of-Java-deserialization/ba-p/6838995) -of various kinds - it never was designed for high throughput messaging after all. However, it is very -convenient to use, thus it remained the default serialization mechanism that Akka used to -serialize user messages as well as some of its internal messages in previous versions. -Since the release of Artery, Akka internals do not rely on Java serialization anymore (exceptions to that being `java.lang.Throwable` and "remote deployment"). - -@@@ note - -Akka does not use Java Serialization for any of its internal messages. -It is highly encouraged to disable java serialization, so please plan to do so at the earliest possibility you have in your project. - -One may think that network bandwidth and latency limit the performance of remote messaging, but serialization is a more typical bottleneck. - -@@@ - -For user messages, the default serializer, implemented using Java serialization, remains available and enabled. -We do however recommend to disable it entirely and utilise a proper serialization library instead in order effectively utilise -the improved performance and ability for rolling deployments using Artery. Libraries that we recommend to use include, -but are not limited to, [Kryo](https://github.com/EsotericSoftware/kryo) by using the [akka-kryo-serialization](https://github.com/romix/akka-kryo-serialization) library or [Google Protocol Buffers](https://developers.google.com/protocol-buffers/) if you want -more control over the schema evolution of your messages. - -In order to completely disable Java Serialization in your Actor system you need to add the following configuration to -your `application.conf`: - -```ruby -akka.actor.allow-java-serialization = off -``` - -This will completely disable the use of `akka.serialization.JavaSerialization` by the -Akka Serialization extension, instead `DisabledJavaSerializer` will -be inserted which will fail explicitly if attempts to use java serialization are made. - -The log messages emitted by such serializer SHOULD be be treated as potential -attacks which the serializer prevented, as they MAY indicate an external operator -attempting to send malicious messages intending to use java serialization as attack vector. -The attempts are logged with the SECURITY marker. - -Please note that this option does not stop you from manually invoking java serialization. - -Please note that this means that you will have to configure different serializers which will able to handle all of your -remote messages. Please refer to the @ref:[Serialization](serialization.md) documentation as well as [ByteBuffer based serialization](#remote-bytebuffer-serialization) to learn how to do this. - -## Routers with Remote Destinations - -It is absolutely feasible to combine remoting with @ref:[Routing](routing.md). - -A pool of remote deployed routees can be configured as: - -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-pool-artery } - -This configuration setting will clone the actor defined in the `Props` of the `remotePool` 10 -times and deploy it evenly distributed across the two given target nodes. - -A group of remote actors can be configured as: - -@@snip [RouterDocSpec.scala]($code$/scala/docs/routing/RouterDocSpec.scala) { #config-remote-round-robin-group-artery } - -This configuration setting will send messages to the defined remote actor paths. -It requires that you create the destination actors on the remote nodes with matching paths. -That is not done by the router. - - -## Remoting Sample - -You can download a ready to run @extref[remoting sample](ecs:akka-samples-remote-java) -together with a tutorial for a more hands-on experience. The source code of this sample can be found in the -@extref[Akka Samples Repository](samples:akka-sample-remote-java). - -## Performance tuning - -### Dedicated subchannel for large messages - -All the communication between user defined remote actors are isolated from the channel of Akka internal messages so -a large user message cannot block an urgent system message. While this provides good isolation for Akka services, all -user communications by default happen through a shared network connection (an Aeron stream). When some actors -send large messages this can cause other messages to suffer higher latency as they need to wait until the full -message has been transported on the shared channel (and hence, shared bottleneck). In these cases it is usually -helpful to separate actors that have different QoS requirements: large messages vs. low latency. - -Akka remoting provides a dedicated channel for large messages if configured. Since actor message ordering must -not be violated the channel is actually dedicated for *actors* instead of messages, to ensure all of the messages -arrive in send order. It is possible to assign actors on given paths to use this dedicated channel by using -path patterns that have to be specified in the actor system's configuration on both the sending and the receiving side: - -``` -akka.remote.artery.large-message-destinations = [ - "/user/largeMessageActor", - "/user/largeMessagesGroup/*", - "/user/anotherGroup/*/largeMesssages", - "/user/thirdGroup/**", -] -``` - -This means that all messages sent to the following actors will pass through the dedicated, large messages channel: - - * `/user/largeMessageActor` - * `/user/largeMessageActorGroup/actor1` - * `/user/largeMessageActorGroup/actor2` - * `/user/anotherGroup/actor1/largeMessages` - * `/user/anotherGroup/actor2/largeMessages` - * `/user/thirdGroup/actor3/` - * `/user/thirdGroup/actor4/actor5` - -Messages destined for actors not matching any of these patterns are sent using the default channel as before. - -### External, shared Aeron media driver - -The Aeron transport is running in a so called [media driver](https://github.com/real-logic/Aeron/wiki/Media-Driver-Operation). -By default, Akka starts the media driver embedded in the same JVM process as application. This is -convenient and simplifies operational concerns by only having one process to start and monitor. - -The media driver may use rather much CPU resources. If you run more than one Akka application JVM on the -same machine it can therefore be wise to share the media driver by running it as a separate process. - -The media driver has also different resource usage characteristics than a normal application and it can -therefore be more efficient and stable to run the media driver as a separate process. - -Given that Aeron jar files are in the classpath the standalone media driver can be started with: - -``` -java io.aeron.driver.MediaDriver -``` - -The needed classpath: - -``` -Agrona-0.5.4.jar:aeron-driver-1.0.1.jar:aeron-client-1.0.1.jar -``` - -You find those jar files on [maven central](http://search.maven.org/), or you can create a -package with your preferred build tool. - -You can pass [Aeron properties](https://github.com/real-logic/Aeron/wiki/Configuration-Options) as -command line *-D* system properties: - -``` --Daeron.dir=/dev/shm/aeron -``` - -You can also define Aeron properties in a file: - -``` -java io.aeron.driver.MediaDriver config/aeron.properties -``` - -An example of such a properties file: - -``` -aeron.mtu.length=16384 -aeron.socket.so_sndbuf=2097152 -aeron.socket.so_rcvbuf=2097152 -aeron.rcv.buffer.length=16384 -aeron.rcv.initial.window.length=2097152 -agrona.disable.bounds.checks=true - -aeron.threading.mode=SHARED_NETWORK - -# low latency settings -#aeron.threading.mode=DEDICATED -#aeron.sender.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy -#aeron.receiver.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy - -# use same director in akka.remote.artery.advanced.aeron-dir config -# of the Akka application -aeron.dir=/dev/shm/aeron -``` - -Read more about the media driver in the [Aeron documentation](https://github.com/real-logic/Aeron/wiki/Media-Driver-Operation). - -To use the external media driver from the Akka application you need to define the following two -configuration properties: - -``` -akka.remote.artery.advanced { - embedded-media-driver = off - aeron-dir = /dev/shm/aeron -} -``` - -The `aeron-dir` must match the directory you started the media driver with, i.e. the `aeron.dir` property. - -Several Akka applications can then be configured to use the same media driver by pointing to the -same directory. - -Note that if the media driver process is stopped the Akka applications that are using it will also be stopped. - -### Aeron Tuning - -See Aeron documentation about [Performance Testing](https://github.com/real-logic/Aeron/wiki/Performance-Testing). - -### Fine-tuning CPU usage latency tradeoff - -Artery has been designed for low latency and as a result it can be CPU hungry when the system is mostly idle. -This is not always desirable. It is possible to tune the tradeoff between CPU usage and latency with -the following configuration: - -``` -# Values can be from 1 to 10, where 10 strongly prefers low latency -# and 1 strongly prefers less CPU usage -akka.remote.artery.advanced.idle-cpu-level = 1 -``` - -By setting this value to a lower number, it tells Akka to do longer "sleeping" periods on its thread dedicated -for [spin-waiting](https://en.wikipedia.org/wiki/Busy_waiting) and hence reducing CPU load when there is no -immediate task to execute at the cost of a longer reaction time to an event when it actually happens. It is worth -to be noted though that during a continuously high-throughput period this setting makes not much difference -as the thread mostly has tasks to execute. This also means that under high throughput (but below maximum capacity) -the system might have less latency than at low message rates. - -## Internal Event Log for Debugging (Flight Recorder) - -@@@ note - -In this version ($akka.version$) the flight-recorder is disabled by default because there is no automatic -file name and path calculation implemented to make it possible to reuse the same file for every restart of -the same actor system without clashing with files produced by other systems (possibly running on the same machine). -Currently, you have to set the path and file names yourself to avoid creating an unbounded number -of files and enable flight recorder manually by adding *akka.remote.artery.advanced.flight-recorder.enabled=on* to -your configuration file. This a limitation of the current version and will not be necessary in the future. - -@@@ - -Emitting event information (logs) from internals is always a tradeoff. The events that are usable for -the Akka developers are usually too low level to be of any use for users and usually need to be fine-grained enough -to provide enough information to be able to debug issues in the internal implementation. This usually means that -these logs are hidden behind special flags and emitted at low log levels to not clutter the log output of the user -system. Unfortunately this means that during production or integration testing these flags are usually off and -events are not available when an actual failure happens - leaving maintainers in the dark about details of the event. -To solve this contradiction, remoting has an internal, high-performance event store for debug events which is always on. -This log and the events that it contains are highly specialized and not directly exposed to users, their primary purpose -is to help the maintainers of Akka to identify and solve issues discovered during daily usage. When you encounter -production issues involving remoting, you can include the flight recorder log file in your bug report to give us -more insight into the nature of the failure. - -There are various important features of this event log: - - * Flight Recorder produces a fixed size file completely encapsulating log rotation. This means that this -file will never grow in size and will not cause any unexpected disk space shortage in production. - * This file is crash resistant, i.e. its contents can be recovered even if the JVM hosting the `ActorSystem` -crashes unexpectedly. - * Very low overhead, specialized, binary logging that has no significant overhead and can be safely left enabled -for production systems. - -The location of the file can be controlled via the *akka.remote.artery.advanced.flight-recoder.destination* setting (see -@ref:[akka-remote (artery)](general/configuration.md#config-akka-remote-artery) for details). By default, a file with the *.afr* extension is produced in the temporary -directory of the operating system. In cases where the flight recorder casuses issues, it can be disabled by adding the -setting *akka.remote.artery.advanced.flight-recorder.enabled=off*, although this is not recommended. - - -## Remote Configuration - -There are lots of configuration properties that are related to remoting in Akka. We refer to the -@ref:[reference configuration](general/configuration.md#config-akka-remote-artery) for more information. - -@@@ note - -Setting properties like the listening IP and port number programmatically is -best done by using something like the following: - -@@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #programmatic-artery } - -@@@ - - -### Akka behind NAT or in a Docker container - -In setups involving Network Address Translation (NAT), Load Balancers or Docker -containers the hostname and port pair that Akka binds to will be different than the "logical" -host name and port pair that is used to connect to the system from the outside. This requires -special configuration that sets both the logical and the bind pairs for remoting. - -```ruby -akka { - remote { - artery { - canonical.hostname = my.domain.com # external (logical) hostname - canonical.port = 8000 # external (logical) port - - bind.hostname = local.address # internal (bind) hostname - bind.port = 25520 # internal (bind) port - } - } -} -``` diff --git a/akka-docs/src/main/paradox/java/remoting-artery.md b/akka-docs/src/main/paradox/java/remoting-artery.md new file mode 120000 index 0000000000..c21485a8db --- /dev/null +++ b/akka-docs/src/main/paradox/java/remoting-artery.md @@ -0,0 +1 @@ +../scala/remoting-artery.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/index.md b/akka-docs/src/main/paradox/java/stream/index.md deleted file mode 100644 index f1fa4daddd..0000000000 --- a/akka-docs/src/main/paradox/java/stream/index.md +++ /dev/null @@ -1,25 +0,0 @@ -# Streams - -@@toc { depth=2 } - -@@@ index - -* [stream-introduction](stream-introduction.md) -* [stream-quickstart](stream-quickstart.md) -* [../general/stream/stream-design](../general/stream/stream-design.md) -* [stream-flows-and-basics](stream-flows-and-basics.md) -* [stream-graphs](stream-graphs.md) -* [stream-composition](stream-composition.md) -* [stream-rate](stream-rate.md) -* [stream-dynamic](stream-dynamic.md) -* [stream-customize](stream-customize.md) -* [stream-integrations](stream-integrations.md) -* [stream-error](stream-error.md) -* [stream-io](stream-io.md) -* [stream-parallelism](stream-parallelism.md) -* [stream-testkit](stream-testkit.md) -* [stages-overview](stages-overview.md) -* [stream-cookbook](stream-cookbook.md) -* [../general/stream/stream-configuration](../general/stream/stream-configuration.md) - -@@@ \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/index.md b/akka-docs/src/main/paradox/java/stream/index.md new file mode 120000 index 0000000000..a22c2ea515 --- /dev/null +++ b/akka-docs/src/main/paradox/java/stream/index.md @@ -0,0 +1 @@ +../../scala/stream/index.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-cookbook.md b/akka-docs/src/main/paradox/java/stream/stream-cookbook.md deleted file mode 100644 index ae3808c1a5..0000000000 --- a/akka-docs/src/main/paradox/java/stream/stream-cookbook.md +++ /dev/null @@ -1,377 +0,0 @@ -# Streams Cookbook - -## Introduction - -This is a collection of patterns to demonstrate various usage of the Akka Streams API by solving small targeted -problems in the format of "recipes". The purpose of this page is to give inspiration and ideas how to approach -various small tasks involving streams. The recipes in this page can be used directly as-is, but they are most powerful as -starting points: customization of the code snippets is warmly encouraged. - -This part also serves as supplementary material for the main body of documentation. It is a good idea to have this page -open while reading the manual and look for examples demonstrating various streaming concepts -as they appear in the main body of documentation. - -If you need a quick reference of the available processing stages used in the recipes see @ref:[stages overview](stages-overview.md). - -## Working with Flows - -In this collection we show simple recipes that involve linear flows. The recipes in this section are rather -general, more targeted recipes are available as separate sections (@ref:[Buffers and working with rate](stream-rate.md), @ref:[Working with streaming IO](stream-io.md)). - -### Logging elements of a stream - -**Situation:** During development it is sometimes helpful to see what happens in a particular section of a stream. - -The simplest solution is to simply use a `map` operation and use `println` to print the elements received to the console. -While this recipe is rather simplistic, it is often suitable for a quick debug session. - -@@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #println-debug } - -Another approach to logging is to use `log()` operation which allows configuring logging for elements flowing through -the stream as well as completion and erroring. - -@@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #log-custom } - -### Flattening a stream of sequences - -**Situation:** A stream is given as a stream of sequence of elements, but a stream of elements needed instead, streaming -all the nested elements inside the sequences separately. - -The `mapConcat` operation can be used to implement a one-to-many transformation of elements using a mapper function -in the form of `In -> List`. In this case we want to map a `List` of elements to the elements in the -collection itself, so we can just call `mapConcat(l -> l)`. - -@@snip [RecipeFlattenList.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java) { #flattening-lists } - -### Draining a stream to a strict collection - -**Situation:** A possibly unbounded sequence of elements is given as a stream, which needs to be collected into a Scala collection while ensuring boundedness - -A common situation when working with streams is one where we need to collect incoming elements into a Scala collection. -This operation is supported via `Sink.seq` which materializes into a `CompletionStage>`. - -The function `limit` or `take` should always be used in conjunction in order to guarantee stream boundedness, thus preventing the program from running out of memory. - -For example, this is best avoided: - -@@snip [RecipeSeq.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-unsafe } - -Rather, use `limit` or `take` to ensure that the resulting `List` will contain only up to `MAX_ALLOWED_SIZE` elements: - -@@snip [RecipeSeq.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-safe } - -### Calculating the digest of a ByteString stream - -**Situation:** A stream of bytes is given as a stream of `ByteString` s and we want to calculate the cryptographic digest -of the stream. - -This recipe uses a `GraphStage` to host a mutable `MessageDigest` class (part of the Java Cryptography -API) and update it with the bytes arriving from the stream. When the stream starts, the `onPull` handler of the -stage is called, which just bubbles up the `pull` event to its upstream. As a response to this pull, a ByteString -chunk will arrive (`onPush`) which we use to update the digest, then it will pull for the next chunk. - -Eventually the stream of `ByteString` s depletes and we get a notification about this event via `onUpstreamFinish`. -At this point we want to emit the digest value, but we cannot do it with `push` in this handler directly since there may -be no downstream demand. Instead we call `emit` which will temporarily replace the handlers, emit the provided value when -demand comes in and then reset the stage state. It will then complete the stage. - -@@snip [RecipeDigest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest } - -@@snip [RecipeDigest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest2 } - - -### Parsing lines from a stream of ByteStrings - -**Situation:** A stream of bytes is given as a stream of `ByteString` s containing lines terminated by line ending -characters (or, alternatively, containing binary frames delimited by a special delimiter byte sequence) which -needs to be parsed. - -The `Framing` helper class contains a convenience method to parse messages from a stream of `ByteString` s: - -@@snip [RecipeParseLines.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeParseLines.java) { #parse-lines } - -### Dealing with compressed data streams - -**Situation:** A gzipped stream of bytes is given as a stream of `ByteString` s, for example from a `FileIO` source. - -The `Compression` helper class contains convenience methods for decompressing data streams compressed with -Gzip or Deflate. - -@@snip [RecipeDecompress.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDecompress.java) { #decompress-gzip } - -### Implementing reduce-by-key - -**Situation:** Given a stream of elements, we want to calculate some aggregated value on different subgroups of the -elements. - -The "hello world" of reduce-by-key style operations is *wordcount* which we demonstrate below. Given a stream of words -we first create a new stream that groups the words according to the `i -> i` function, i.e. now -we have a stream of streams, where every substream will serve identical words. - -To count the words, we need to process the stream of streams (the actual groups -containing identical words). `groupBy` returns a `SubSource`, which -means that we transform the resulting substreams directly. In this case we use -the `reduce` combinator to aggregate the word itself and the number of its -occurrences within a `Pair`. Each substream will then -emit one final value—precisely such a pair—when the overall input completes. As -a last step we merge back these values from the substreams into one single -output stream. - -One noteworthy detail pertains to the `MAXIMUM_DISTINCT_WORDS` parameter: -this defines the breadth of the merge operation. Akka Streams is focused on -bounded resource consumption and the number of concurrently open inputs to the -merge operator describes the amount of resources needed by the merge itself. -Therefore only a finite number of substreams can be active at any given time. -If the `groupBy` operator encounters more keys than this number then the -stream cannot continue without violating its resource bound, in this case -`groupBy` will terminate with a failure. - -@@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #word-count } - -By extracting the parts specific to *wordcount* into - - * a `groupKey` function that defines the groups - * a `map` map each element to value that is used by the reduce on the substream - * a `reduce` function that does the actual reduction - -we get a generalized version below: - -@@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general } - -@@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general2 } - -@@@ note - -Please note that the reduce-by-key version we discussed above is sequential -in reading the overall input stream, in other words it is **NOT** a -parallelization pattern like MapReduce and similar frameworks. - -@@@ - -### Sorting elements to multiple groups with groupBy - -**Situation:** The `groupBy` operation strictly partitions incoming elements, each element belongs to exactly one group. -Sometimes we want to map elements into multiple groups simultaneously. - -To achieve the desired result, we attack the problem in two steps: - - * first, using a function `topicMapper` that gives a list of topics (groups) a message belongs to, we transform our -stream of `Message` to a stream of `Pair` where for each topic the message belongs to a separate pair -will be emitted. This is achieved by using `mapConcat` - * Then we take this new stream of message topic pairs (containing a separate pair for each topic a given message -belongs to) and feed it into groupBy, using the topic as the group key. - -@@snip [RecipeMultiGroupByTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java) { #multi-groupby } - -## Working with Graphs - -In this collection we show recipes that use stream graph elements to achieve various goals. - -### Triggering the flow of elements programmatically - -**Situation:** Given a stream of elements we want to control the emission of those elements according to a trigger signal. -In other words, even if the stream would be able to flow (not being backpressured) we want to hold back elements until a -trigger signal arrives. - -This recipe solves the problem by simply zipping the stream of `Message` elements with the stream of `Trigger` -signals. Since `Zip` produces pairs, we simply map the output stream selecting the first element of the pair. - -@@snip [RecipeManualTrigger.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream } - -Alternatively, instead of using a `Zip`, and then using `map` to get the first element of the pairs, we can avoid -creating the pairs in the first place by using `ZipWith` which takes a two argument function to produce the output -element. If this function would return a pair of the two argument it would be exactly the behavior of `Zip` so -`ZipWith` is a generalization of zipping. - -@@snip [RecipeManualTrigger.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream-zipwith } - -### Balancing jobs to a fixed pool of workers - -**Situation:** Given a stream of jobs and a worker process expressed as a `Flow` create a pool of workers -that automatically balances incoming jobs to available workers, then merges the results. - -We will express our solution as a function that takes a worker flow and the number of workers to be allocated and gives -a flow that internally contains a pool of these workers. To achieve the desired result we will create a `Flow` -from a graph. - -The graph consists of a `Balance` node which is a special fan-out operation that tries to route elements to available -downstream consumers. In a `for` loop we wire all of our desired workers as outputs of this balancer element, then -we wire the outputs of these workers to a `Merge` element that will collect the results from the workers. - -To make the worker stages run in parallel we mark them as asynchronous with *async()*. - -@@snip [RecipeWorkerPool.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool } - -@@snip [RecipeWorkerPool.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool2 } - -## Working with rate - -This collection of recipes demonstrate various patterns where rate differences between upstream and downstream -needs to be handled by other strategies than simple backpressure. - -### Dropping elements - -**Situation:** Given a fast producer and a slow consumer, we want to drop elements if necessary to not slow down -the producer too much. - -This can be solved by using a versatile rate-transforming operation, `conflate`. Conflate can be thought as -a special `reduce` operation that collapses multiple upstream elements into one aggregate element if needed to keep -the speed of the upstream unaffected by the downstream. - -When the upstream is faster, the reducing process of the `conflate` starts. Our reducer function simply takes -the freshest element. This in a simple dropping operation. - -@@snip [RecipeSimpleDrop.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java) { #simple-drop } - -There is a version of `conflate` named `conflateWithSeed` that allows to express more complex aggregations, more -similar to a `fold`. - -### Dropping broadcast - -**Situation:** The default `Broadcast` graph element is properly backpressured, but that means that a slow downstream -consumer can hold back the other downstream consumers resulting in lowered throughput. In other words the rate of -`Broadcast` is the rate of its slowest downstream consumer. In certain cases it is desirable to allow faster consumers -to progress independently of their slower siblings by dropping elements if necessary. - -One solution to this problem is to append a `buffer` element in front of all of the downstream consumers -defining a dropping strategy instead of the default `Backpressure`. This allows small temporary rate differences -between the different consumers (the buffer smooths out small rate variances), but also allows faster consumers to -progress by dropping from the buffer of the slow consumers if necessary. - -@@snip [RecipeDroppyBroadcast.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast } - -@@snip [RecipeDroppyBroadcast.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast2 } - -### Collecting missed ticks - -**Situation:** Given a regular (stream) source of ticks, instead of trying to backpressure the producer of the ticks -we want to keep a counter of the missed ticks instead and pass it down when possible. - -We will use `conflateWithSeed` to solve the problem. Conflate takes two functions: - - * A seed function that produces the zero element for the folding process that happens when the upstream is faster than -the downstream. In our case the seed function is a constant function that returns 0 since there were no missed ticks -at that point. - * A fold function that is invoked when multiple upstream messages needs to be collapsed to an aggregate value due -to the insufficient processing rate of the downstream. Our folding function simply increments the currently stored -count of the missed ticks so far. - -As a result, we have a flow of `Int` where the number represents the missed ticks. A number 0 means that we were -able to consume the tick fast enough (i.e. zero means: 1 non-missed tick + 0 missed ticks) - -@@snip [RecipeMissedTicks.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java) { #missed-ticks } - -### Create a stream processor that repeats the last element seen - -**Situation:** Given a producer and consumer, where the rate of neither is known in advance, we want to ensure that none -of them is slowing down the other by dropping earlier unconsumed elements from the upstream if necessary, and repeating -the last value for the downstream if necessary. - -We have two options to implement this feature. In both cases we will use `GraphStage` to build our custom -element. In the first version we will use a provided initial value `initial` that will be used -to feed the downstream if no upstream element is ready yet. In the `onPush()` handler we just overwrite the -`currentValue` variable and immediately relieve the upstream by calling `pull()`. The downstream `onPull` handler -is very similar, we immediately relieve the downstream by emitting `currentValue`. - -@@snip [RecipeHold.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-1 } - -While it is relatively simple, the drawback of the first version is that it needs an arbitrary initial element which is not -always possible to provide. Hence, we create a second version where the downstream might need to wait in one single -case: if the very first element is not yet available. - -We introduce a boolean variable `waitingFirstValue` to denote whether the first element has been provided or not -(alternatively an `Optional` can be used for `currentValue` or if the element type is a subclass of Object -a null can be used with the same purpose). In the downstream `onPull()` handler the difference from the previous -version is that we check if we have received the first value and only emit if we have. This leads to that when the -first element comes in we must check if there possibly already was demand from downstream so that we in that case can -push the element directly. - -@@snip [RecipeHold.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-2 } - -### Globally limiting the rate of a set of streams - -**Situation:** Given a set of independent streams that we cannot merge, we want to globally limit the aggregate -throughput of the set of streams. - -One possible solution uses a shared actor as the global limiter combined with mapAsync to create a reusable -`Flow` that can be plugged into a stream to limit its rate. - -As the first step we define an actor that will do the accounting for the global rate limit. The actor maintains -a timer, a counter for pending permit tokens and a queue for possibly waiting participants. The actor has -an `open` and `closed` state. The actor is in the `open` state while it has still pending permits. Whenever a -request for permit arrives as a `WantToPass` message to the actor the number of available permits is decremented -and we notify the sender that it can pass by answering with a `MayPass` message. If the amount of permits reaches -zero, the actor transitions to the `closed` state. In this state requests are not immediately answered, instead the reference -of the sender is added to a queue. Once the timer for replenishing the pending permits fires by sending a `ReplenishTokens` -message, we increment the pending permits counter and send a reply to each of the waiting senders. If there are more -waiting senders than permits available we will stay in the `closed` state. - -@@snip [RecipeGlobalRateLimit.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-actor } - -To create a Flow that uses this global limiter actor we use the `mapAsync` function with the combination of the `ask` -pattern. We also define a timeout, so if a reply is not received during the configured maximum wait period the returned -future from `ask` will fail, which will fail the corresponding stream as well. - -@@snip [RecipeGlobalRateLimit.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-flow } - -@@@ note - -The global actor used for limiting introduces a global bottleneck. You might want to assign a dedicated dispatcher -for this actor. - -@@@ - -## Working with IO - -### Chunking up a stream of ByteStrings into limited size ByteStrings - -**Situation:** Given a stream of `ByteString` s we want to produce a stream of `ByteString` s containing the same bytes in -the same sequence, but capping the size of `ByteString` s. In other words we want to slice up `ByteString` s into smaller -chunks if they exceed a size threshold. - -This can be achieved with a single `GraphStage`. The main logic of our stage is in `emitChunk()` -which implements the following logic: - - * if the buffer is empty, and upstream is not closed we pull for more bytes, if it is closed we complete - * if the buffer is nonEmpty, we split it according to the `chunkSize`. This will give a next chunk that we will emit, -and an empty or nonempty remaining buffer. - -Both `onPush()` and `onPull()` calls `emitChunk()` the only difference is that the push handler also stores -the incoming chunk by appending to the end of the buffer. - -@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker } - -@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker2 } - -### Limit the number of bytes passing through a stream of ByteStrings - -**Situation:** Given a stream of `ByteString` s we want to fail the stream if more than a given maximum of bytes has been -consumed. - -This recipe uses a `GraphStage` to implement the desired feature. In the only handler we override, -`onPush()` we just update a counter and see if it gets larger than `maximumBytes`. If a violation happens -we signal failure, otherwise we forward the chunk we have received. - -@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter } - -@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter2 } - -### Compact ByteStrings in a stream of ByteStrings - -**Situation:** After a long stream of transformations, due to their immutable, structural sharing nature `ByteString` s may -refer to multiple original ByteString instances unnecessarily retaining memory. As the final step of a transformation -chain we want to have clean copies that are no longer referencing the original `ByteString` s. - -The recipe is a simple use of map, calling the `compact()` method of the `ByteString` elements. This does -copying of the underlying arrays, so this should be the last element of a long chain if used. - -@@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #compacting-bytestrings } - -### Injecting keep-alive messages into a stream of ByteStrings - -**Situation:** Given a communication channel expressed as a stream of `ByteString` s we want to inject keep-alive messages -but only if this does not interfere with normal traffic. - -There is a built-in operation that allows to do this directly: - -@@snip [RecipeKeepAlive.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java) { #inject-keepalive } diff --git a/akka-docs/src/main/paradox/java/stream/stream-cookbook.md b/akka-docs/src/main/paradox/java/stream/stream-cookbook.md new file mode 120000 index 0000000000..3f4abcf620 --- /dev/null +++ b/akka-docs/src/main/paradox/java/stream/stream-cookbook.md @@ -0,0 +1 @@ +../../scala/stream/stream-cookbook.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-graphs.md b/akka-docs/src/main/paradox/java/stream/stream-graphs.md deleted file mode 100644 index e94e3cba68..0000000000 --- a/akka-docs/src/main/paradox/java/stream/stream-graphs.md +++ /dev/null @@ -1,302 +0,0 @@ -# Working with Graphs - -In Akka Streams computation graphs are not expressed using a fluent DSL like linear computations are, instead they are -written in a more graph-resembling DSL which aims to make translating graph drawings (e.g. from notes taken -from design discussions, or illustrations in protocol specifications) to and from code simpler. In this section we'll -dive into the multiple ways of constructing and re-using graphs, as well as explain common pitfalls and how to avoid them. - -Graphs are needed whenever you want to perform any kind of fan-in ("multiple inputs") or fan-out ("multiple outputs") operations. -Considering linear Flows to be like roads, we can picture graph operations as junctions: multiple flows being connected at a single point. -Some graph operations which are common enough and fit the linear style of Flows, such as `concat` (which concatenates two -streams, such that the second one is consumed after the first one has completed), may have shorthand methods defined on -`Flow` or `Source` themselves, however you should keep in mind that those are also implemented as graph junctions. - - -## Constructing Graphs - -Graphs are built from simple Flows which serve as the linear connections within the graphs as well as junctions -which serve as fan-in and fan-out points for Flows. Thanks to the junctions having meaningful types based on their behaviour -and making them explicit elements these elements should be rather straightforward to use. - -Akka Streams currently provide these junctions (for a detailed list see @ref:[stages overview](stages-overview.md)): - - * **Fan-out** - - * `Broadcast` – *(1 input, N outputs)* given an input element emits to each output - * `Balance` – *(1 input, N outputs)* given an input element emits to one of its output ports - * `UnzipWith` – *(1 input, N outputs)* takes a function of 1 input that given a value for each input emits N output elements (where N <= 20) - * `UnZip` – *(1 input, 2 outputs)* splits a stream of `Pair` tuples into two streams, one of type `A` and one of type `B` - - * **Fan-in** - - * `Merge` – *(N inputs , 1 output)* picks randomly from inputs pushing them one by one to its output - * `MergePreferred` – like `Merge` but if elements are available on `preferred` port, it picks from it, otherwise randomly from `others` - * `MergePrioritized` – like `Merge` but if elements are available on all input ports, it picks from them randomly based on their `priority` - * `ZipWith` – *(N inputs, 1 output)* which takes a function of N inputs that given a value for each input emits 1 output element - * `Zip` – *(2 inputs, 1 output)* is a `ZipWith` specialised to zipping input streams of `A` and `B` into a `Pair(A,B)` tuple stream - * `Concat` – *(2 inputs, 1 output)* concatenates two streams (first consume one, then the second one) - -One of the goals of the GraphDSL DSL is to look similar to how one would draw a graph on a whiteboard, so that it is -simple to translate a design from whiteboard to code and be able to relate those two. Let's illustrate this by translating -the below hand drawn graph into Akka Streams: - -![simple-graph-example.png](../../images/simple-graph-example.png) - -Such graph is simple to translate to the Graph DSL since each linear element corresponds to a `Flow`, -and each circle corresponds to either a `Junction` or a `Source` or `Sink` if it is beginning -or ending a `Flow`. - -@@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #simple-graph-dsl } - -@@@ note - -Junction *reference equality* defines *graph node equality* (i.e. the same merge *instance* used in a GraphDSL -refers to the same location in the resulting graph). - -@@@ - -By looking at the snippets above, it should be apparent that the `builder` object is *mutable*. -The reason for this design choice is to enable simpler creation of complex graphs, which may even contain cycles. -Once the GraphDSL has been constructed though, the `RunnableGraph` instance *is immutable, thread-safe, and freely shareable*. -The same is true of all graph pieces—sources, sinks, and flows—once they are constructed. -This means that you can safely re-use one given Flow or junction in multiple places in a processing graph. - -We have seen examples of such re-use already above: the merge and broadcast junctions were imported -into the graph using `builder.add(...)`, an operation that will make a copy of the blueprint that -is passed to it and return the inlets and outlets of the resulting copy so that they can be wired up. -Another alternative is to pass existing graphs—of any shape—into the factory method that produces a -new graph. The difference between these approaches is that importing using `builder.add(...)` ignores the -materialized value of the imported graph while importing via the factory method allows its inclusion; -for more details see @ref:[Stream Materialization](../stream/stream-flows-and-basics.md#stream-materialization). - -In the example below we prepare a graph that consists of two parallel streams, -in which we re-use the same instance of `Flow`, yet it will properly be -materialized as two connections between the corresponding Sources and Sinks: - -@@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-reusing-a-flow } - - -## Constructing and combining Partial Graphs - -Sometimes it is not possible (or needed) to construct the entire computation graph in one place, but instead construct -all of its different phases in different places and in the end connect them all into a complete graph and run it. - -This can be achieved by using the returned `Graph` from `GraphDSL.create()` rather than -passing it to `RunnableGraph.fromGraph()` to wrap it in a `RunnableGraph`.The reason of representing it as a different type is that a -`RunnableGraph` requires all ports to be connected, and if they are not -it will throw an exception at construction time, which helps to avoid simple -wiring errors while working with graphs. A partial graph however allows -you to return the set of yet to be connected ports from the code block that -performs the internal wiring. - -Let's imagine we want to provide users with a specialized element that given 3 inputs will pick -the greatest int value of each zipped triple. We'll want to expose 3 input ports (unconnected sources) and one output port -(unconnected sink). - -@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #simple-partial-graph-dsl } - -As you can see, first we construct the partial graph that describes how to compute the maximum of two input streams, then -we reuse that twice while constructing the partial graph that extends this to three input streams, -then we import it (all of its nodes and connections) explicitly into the last graph in which all -the undefined elements are rewired to real sources and sinks. The graph can then be run and yields the expected result. - -@@@ warning - -Please note that `GraphDSL` is not able to provide compile time type-safety about whether or not all -elements have been properly connected—this validation is performed as a runtime check during the graph's instantiation. - -A partial graph also verifies that all ports are either connected or part of the returned `Shape`. - -@@@ - - -## Constructing Sources, Sinks and Flows from Partial Graphs - -Instead of treating a `Graph` as simply a collection of flows and junctions which may not yet all be -connected it is sometimes useful to expose such a complex graph as a simpler structure, -such as a `Source`, `Sink` or `Flow`. - -In fact, these concepts can be easily expressed as special cases of a partially connected graph: - - * `Source` is a partial graph with *exactly one* output, that is it returns a `SourceShape`. - * `Sink` is a partial graph with *exactly one* input, that is it returns a `SinkShape`. - * `Flow` is a partial graph with *exactly one* input and *exactly one* output, that is it returns a `FlowShape`. - -Being able to hide complex graphs inside of simple elements such as Sink / Source / Flow enables you to easily create one -complex element and from there on treat it as simple compound stage for linear computations. - -In order to create a Source from a graph the method `Source.fromGraph` is used, to use it we must have a -`Graph` with a `SourceShape`. This is constructed using `GraphDSL.create` and providing building a `SourceShape` -graph. The single outlet must be provided to the `SourceShape.of` method and will become “the sink that must -be attached before this Source can run”. - -Refer to the example below, in which we create a Source that zips together two numbers, to see this graph -construction in action: - -@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-from-partial-graph-dsl } - -Similarly the same can be done for a `Sink` using `SinkShape.of` in which case the provided value must be an -`Inlet`. For defining a `Flow` we need to expose both an undefined source and sink: - -@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #flow-from-partial-graph-dsl } - -## Combining Sources and Sinks with simplified API - -There is simplified API you can use to combine sources and sinks with junctions like: `Broadcast`, `Balance`, -`Merge` and `Concat` without the need for using the Graph DSL. The combine method takes care of constructing -the necessary graph underneath. In following example we combine two sources into one (fan-in): - -@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-combine } - -The same can be done for a `Sink` but in this case it will be fan-out: - -@@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #sink-combine } - - -## Bidirectional Flows - -A graph topology that is often useful is that of two flows going in opposite -directions. Take for example a codec stage that serializes outgoing messages -and deserializes incoming octet streams. Another such stage could add a framing -protocol that attaches a length header to outgoing data and parses incoming -frames back into the original octet stream chunks. These two stages are meant -to be composed, applying one atop the other as part of a protocol stack. For -this purpose exists the special type `BidiFlow` which is a graph that -has exactly two open inlets and two open outlets. The corresponding shape is -called `BidiShape` and is defined like this: - -@@snip [Shape.scala]($akka$/akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } - -A bidirectional flow is defined just like a unidirectional `Flow` as -demonstrated for the codec mentioned above: - -@@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #codec } - -The first version resembles the partial graph constructor, while for the simple -case of a functional 1:1 transformation there is a concise convenience method -as shown on the last line. The implementation of the two functions is not -difficult either: - -@@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #codec-impl } - -In this way you could easily integrate any other serialization library that -turns an object into a sequence of bytes. - -The other stage that we talked about is a little more involved since reversing -a framing protocol means that any received chunk of bytes may correspond to -zero or more messages. This is best implemented using a `GraphStage` -(see also @ref:[Custom processing with GraphStage](stream-customize.md#graphstage)). - -@@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #framing } - -With these implementations we can build a protocol stack and test it: - -@@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #compose } - -This example demonstrates how `BidiFlow` subgraphs can be hooked -together and also turned around with the `.reversed()` method. The test -simulates both parties of a network communication protocol without actually -having to open a network connection—the flows can just be connected directly. - - -## Accessing the materialized value inside the Graph - -In certain cases it might be necessary to feed back the materialized value of a Graph (partial, closed or backing a -Source, Sink, Flow or BidiFlow). This is possible by using `builder.materializedValue` which gives an `Outlet` that -can be used in the graph as an ordinary source or outlet, and which will eventually emit the materialized value. -If the materialized value is needed at more than one place, it is possible to call `materializedValue` any number of -times to acquire the necessary number of outlets. - -@@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-matvalue } - -Be careful not to introduce a cycle where the materialized value actually contributes to the materialized value. -The following example demonstrates a case where the materialized `CompletionStage` of a fold is fed back to the fold itself. - -@@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-matvalue-cycle } - - -## Graph cycles, liveness and deadlocks - -Cycles in bounded stream topologies need special considerations to avoid potential deadlocks and other liveness issues. -This section shows several examples of problems that can arise from the presence of feedback arcs in stream processing -graphs. - -In the following examples runnable graphs are created but do not run because each have some issue and will deadlock after start. -`Source` variable is not defined as the nature and number of element does not matter for described problems. - -The first example demonstrates a graph that contains a naïve cycle. -The graph takes elements from the source, prints them, then broadcasts those elements -to a consumer (we just used `Sink.ignore` for now) and to a feedback arc that is merged back into the main -via a `Merge` junction. - -@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #deadlocked } - -Running this we observe that after a few numbers have been printed, no more elements are logged to the console - -all processing stops after some time. After some investigation we observe that: - - * through merging from `source` we increase the number of elements flowing in the cycle - * by broadcasting back to the cycle we do not decrease the number of elements in the cycle - -Since Akka Streams (and Reactive Streams in general) guarantee bounded processing (see the "Buffering" section for more -details) it means that only a bounded number of elements are buffered over any time span. Since our cycle gains more and -more elements, eventually all of its internal buffers become full, backpressuring `source` forever. To be able -to process more elements from `source` elements would need to leave the cycle somehow. - -If we modify our feedback loop by replacing the `Merge` junction with a `MergePreferred` we can avoid the deadlock. -`MergePreferred` is unfair as it always tries to consume from a preferred input port if there are elements available -before trying the other lower priority input ports. Since we feed back through the preferred port it is always guaranteed -that the elements in the cycles can flow. - -@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #unfair } - -If we run the example we see that the same sequence of numbers are printed -over and over again, but the processing does not stop. Hence, we avoided the deadlock, but `source` is still -back-pressured forever, because buffer space is never recovered: the only action we see is the circulation of a couple -of initial elements from `source`. - -@@@ note - -What we see here is that in certain cases we need to choose between boundedness and liveness. Our first example would -not deadlock if there would be an infinite buffer in the loop, or vice versa, if the elements in the cycle would -be balanced (as many elements are removed as many are injected) then there would be no deadlock. - -@@@ - -To make our cycle both live (not deadlocking) and fair we can introduce a dropping element on the feedback arc. In this -case we chose the `buffer()` operation giving it a dropping strategy `OverflowStrategy.dropHead`. - -@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #dropping } - -If we run this example we see that - - * The flow of elements does not stop, there are always elements printed - * We see that some of the numbers are printed several times over time (due to the feedback loop) but on average -the numbers are increasing in the long term - -This example highlights that one solution to avoid deadlocks in the presence of potentially unbalanced cycles -(cycles where the number of circulating elements are unbounded) is to drop elements. An alternative would be to -define a larger buffer with `OverflowStrategy.fail` which would fail the stream instead of deadlocking it after -all buffer space has been consumed. - -As we discovered in the previous examples, the core problem was the unbalanced nature of the feedback loop. We -circumvented this issue by adding a dropping element, but now we want to build a cycle that is balanced from -the beginning instead. To achieve this we modify our first graph by replacing the `Merge` junction with a `ZipWith`. -Since `ZipWith` takes one element from `source` *and* from the feedback arc to inject one element into the cycle, -we maintain the balance of elements. - -@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-dead } - -Still, when we try to run the example it turns out that no element is printed at all! After some investigation we -realize that: - - * In order to get the first element from `source` into the cycle we need an already existing element in the cycle - * In order to get an initial element in the cycle we need an element from `source` - -These two conditions are a typical "chicken-and-egg" problem. The solution is to inject an initial -element into the cycle that is independent from `source`. We do this by using a `Concat` junction on the backwards -arc that injects a single element using `Source.single`. - -@@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-live } - -When we run the above example we see that processing starts and never stops. The important takeaway from this example -is that balanced cycles often need an initial "kick-off" element to be injected into the cycle. diff --git a/akka-docs/src/main/paradox/java/stream/stream-graphs.md b/akka-docs/src/main/paradox/java/stream/stream-graphs.md new file mode 120000 index 0000000000..4a58fe91cf --- /dev/null +++ b/akka-docs/src/main/paradox/java/stream/stream-graphs.md @@ -0,0 +1 @@ +../../scala/stream/stream-graphs.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/stream/stream-parallelism.md b/akka-docs/src/main/paradox/java/stream/stream-parallelism.md deleted file mode 100644 index 7b7b51be94..0000000000 --- a/akka-docs/src/main/paradox/java/stream/stream-parallelism.md +++ /dev/null @@ -1,105 +0,0 @@ -# Pipelining and Parallelism - -Akka Streams processing stages (be it simple operators on Flows and Sources or graph junctions) are "fused" together -and executed sequentially by default. This avoids the overhead of events crossing asynchronous boundaries but -limits the flow to execute at most one stage at any given time. - -In many cases it is useful to be able to concurrently execute the stages of a flow, this is done by explicitly marking -them as asynchronous using the `async()` method. Each processing stage marked as asynchronous will run in a -dedicated actor internally, while all stages not marked asynchronous will run in one single actor. - -We will illustrate through the example of pancake cooking how streams can be used for various processing patterns, -exploiting the available parallelism on modern computers. The setting is the following: both Patrik and Roland -like to make pancakes, but they need to produce sufficient amount in a cooking session to make all of the children -happy. To increase their pancake production throughput they use two frying pans. How they organize their pancake -processing is markedly different. - -## Pipelining - -Roland uses the two frying pans in an asymmetric fashion. The first pan is only used to fry one side of the -pancake then the half-finished pancake is flipped into the second pan for the finishing fry on the other side. -Once the first frying pan becomes available it gets a new scoop of batter. As an effect, most of the time there -are two pancakes being cooked at the same time, one being cooked on its first side and the second being cooked to -completion. -This is how this setup would look like implemented as a stream: - -@@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelining } - -The two `map` stages in sequence (encapsulated in the "frying pan" flows) will be executed in a pipelined way, -basically doing the same as Roland with his frying pans: - - 1. A `ScoopOfBatter` enters `fryingPan1` - 2. `fryingPan1` emits a HalfCookedPancake once `fryingPan2` becomes available - 3. `fryingPan2` takes the HalfCookedPancake - 4. at this point fryingPan1 already takes the next scoop, without waiting for fryingPan2 to finish - -The benefit of pipelining is that it can be applied to any sequence of processing steps that are otherwise not -parallelisable (for example because the result of a processing step depends on all the information from the previous -step). One drawback is that if the processing times of the stages are very different then some of the stages will not -be able to operate at full throughput because they will wait on a previous or subsequent stage most of the time. In the -pancake example frying the second half of the pancake is usually faster than frying the first half, `fryingPan2` will -not be able to operate at full capacity [1]. - -@@@ note - -Asynchronous stream processing stages have internal buffers to make communication between them more efficient. -For more details about the behavior of these and how to add additional buffers refer to @ref:[Buffers and working with rate](stream-rate.md). - -@@@ - -## Parallel processing - -Patrik uses the two frying pans symmetrically. He uses both pans to fully fry a pancake on both sides, then puts -the results on a shared plate. Whenever a pan becomes empty, he takes the next scoop from the shared bowl of batter. -In essence he parallelizes the same process over multiple pans. This is how this setup will look like if implemented -using streams: - -@@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #parallelism } - -The benefit of parallelizing is that it is easy to scale. In the pancake example -it is easy to add a third frying pan with Patrik's method, but Roland cannot add a third frying pan, -since that would require a third processing step, which is not practically possible in the case of frying pancakes. - -One drawback of the example code above that it does not preserve the ordering of pancakes. This might be a problem -if children like to track their "own" pancakes. In those cases the `Balance` and `Merge` stages should be replaced -by strict-round robing balancing and merging stages that put in and take out pancakes in a strict order. - -A more detailed example of creating a worker pool can be found in the cookbook: @ref:[Balancing jobs to a fixed pool of workers](stream-cookbook.md#cookbook-balance) - -## Combining pipelining and parallel processing - -The two concurrency patterns that we demonstrated as means to increase throughput are not exclusive. -In fact, it is rather simple to combine the two approaches and streams provide -a nice unifying language to express and compose them. - -First, let's look at how we can parallelize pipelined processing stages. In the case of pancakes this means that we -will employ two chefs, each working using Roland's pipelining method, but we use the two chefs in parallel, just like -Patrik used the two frying pans. This is how it looks like if expressed as streams: - -@@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #parallel-pipeline } - -The above pattern works well if there are many independent jobs that do not depend on the results of each other, but -the jobs themselves need multiple processing steps where each step builds on the result of -the previous one. In our case individual pancakes do not depend on each other, they can be cooked in parallel, on the -other hand it is not possible to fry both sides of the same pancake at the same time, so the two sides have to be fried -in sequence. - -It is also possible to organize parallelized stages into pipelines. This would mean employing four chefs: - - * the first two chefs prepare half-cooked pancakes from batter, in parallel, then putting those on a large enough -flat surface. - * the second two chefs take these and fry their other side in their own pans, then they put the pancakes on a shared -plate. - -This is again straightforward to implement with the streams API: - -@@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelined-parallel } - -This usage pattern is less common but might be usable if a certain step in the pipeline might take wildly different -times to finish different jobs. The reason is that there are more balance-merge steps in this pattern -compared to the parallel pipelines. This pattern rebalances after each step, while the previous pattern only balances -at the entry point of the pipeline. This only matters however if the processing time distribution has a large -deviation. - -> [1] Roland's reason for this seemingly suboptimal procedure is that he prefers the temperature of the second pan -to be slightly lower than the first in order to achieve a more homogeneous result. diff --git a/akka-docs/src/main/paradox/java/stream/stream-parallelism.md b/akka-docs/src/main/paradox/java/stream/stream-parallelism.md new file mode 120000 index 0000000000..27397364cc --- /dev/null +++ b/akka-docs/src/main/paradox/java/stream/stream-parallelism.md @@ -0,0 +1 @@ +../../scala/stream/stream-parallelism.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/typed-actors.md b/akka-docs/src/main/paradox/java/typed-actors.md deleted file mode 100644 index aa92beff82..0000000000 --- a/akka-docs/src/main/paradox/java/typed-actors.md +++ /dev/null @@ -1,209 +0,0 @@ -# Typed Actors - -Akka Typed Actors is an implementation of the [Active Objects](http://en.wikipedia.org/wiki/Active_object) pattern. -Essentially turning method invocations into asynchronous dispatch instead of synchronous that has been the default way since Smalltalk came out. - -Typed Actors consist of 2 "parts", a public interface and an implementation, and if you've done any work in "enterprise" Java, this will be very familiar to you. As with normal Actors you have an external API (the public interface instance) that will delegate method calls asynchronously to -a private instance of the implementation. - -The advantage of Typed Actors vs. Actors is that with TypedActors you have a static contract, and don't need to define your own messages, the downside is that it places some limitations on what you can do and what you can't, i.e. you can't use become/unbecome. - -Typed Actors are implemented using [JDK Proxies](http://docs.oracle.com/javase/6/jdocs/api/java/lang/reflect/Proxy.html) which provide a pretty easy-worked API to intercept method calls. - -@@@ note - -Just as with regular Akka Untyped Actors, Typed Actors process one call at a time. - -@@@ - -## When to use Typed Actors - -Typed actors are nice for bridging between actor systems (the “inside”) and -non-actor code (the “outside”), because they allow you to write normal -OO-looking code on the outside. Think of them like doors: their practicality -lies in interfacing between private sphere and the public, but you don’t want -that many doors inside your house, do you? For a longer discussion see [this -blog post](http://letitcrash.com/post/19074284309/when-to-use-typedactors). - -A bit more background: TypedActors can easily be abused as RPC, and that -is an abstraction which is [well-known](http://doc.akka.io/jdocs/misc/smli_tr-94-29.pdf) -to be leaky. Hence TypedActors are not what we think of first when we talk -about making highly scalable concurrent software easier to write correctly. -They have their niche, use them sparingly. - -## The tools of the trade - -Before we create our first Typed Actor we should first go through the tools that we have at our disposal, -it's located in `akka.actor.TypedActor`. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-extension-tools } - -@@@ warning - -Same as not exposing `this` of an Akka Actor, it's important not to expose `this` of a Typed Actor, -instead you should pass the external proxy reference, which is obtained from within your Typed Actor as -`TypedActor.self()`, this is your external identity, as the `ActorRef` is the external identity of -an Akka Actor. - -@@@ - -## Creating Typed Actors - -To create a Typed Actor you need to have one or more interfaces, and one implementation. - -The following imports are assumed: - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #imports } - -Our example interface: - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } - -Our example implementation of that interface: - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } - -The most trivial way of creating a Typed Actor instance -of our `Squarer`: - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create1 } - -First type is the type of the proxy, the second type is the type of the implementation. -If you need to call a specific constructor you do it like this: - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create2 } - -Since you supply a `Props`, you can specify which dispatcher to use, what the default timeout should be used and more. -Now, our `Squarer` doesn't have any methods, so we'd better add those. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } - -Alright, now we've got some methods we can call, but we need to implement those in `SquarerImpl`. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } - -Excellent, now we have an interface and an implementation of that interface, -and we know how to create a Typed Actor from that, so let's look at calling these methods. - -## Method dispatch semantics - -Methods returning: - - * `void` will be dispatched with `fire-and-forget` semantics, exactly like `ActorRef.tell` - * `scala.concurrent.Future` will use `send-request-reply` semantics, exactly like `ActorRef.ask` - * `akka.japi.Option` will use `send-request-reply` semantics, but *will* block to wait for an answer, -and return `akka.japi.Option.None` if no answer was produced within the timeout, or `akka.japi.Option.Some` containing the result otherwise. -Any exception that was thrown during this call will be rethrown. - * Any other type of value will use `send-request-reply` semantics, but *will* block to wait for an answer, -throwing `java.util.concurrent.TimeoutException` if there was a timeout or rethrow any exception that was thrown during this call. -Note that due to the Java exception and reflection mechanisms, such a `TimeoutException` will be wrapped in a `java.lang.reflect.UndeclaredThrowableException` -unless the interface method explicitly declares the `TimeoutException` as a thrown checked exception. - -## Messages and immutability - -While Akka cannot enforce that the parameters to the methods of your Typed Actors are immutable, -we *strongly* recommend that parameters passed are immutable. - -### One-way message send - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-oneway } - -As simple as that! The method will be executed on another thread; asynchronously. - -### Request-reply message send - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-option } - -This will block for as long as the timeout that was set in the `Props` of the Typed Actor, -if needed. It will return `None` if a timeout occurs. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-strict } - -This will block for as long as the timeout that was set in the `Props` of the Typed Actor, -if needed. It will throw a `java.util.concurrent.TimeoutException` if a timeout occurs. -Note that here, such a `TimeoutException` will be wrapped in a -`java.lang.reflect.UndeclaredThrowableException` by the Java reflection mechanism, -because the interface method does not explicitly declare the `TimeoutException` as a thrown checked exception. -To get the `TimeoutException` directly, declare `throws java.util.concurrent.TimeoutException` at the -interface method. - -### Request-reply-with-future message send - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-future } - -This call is asynchronous, and the Future returned can be used for asynchronous composition. - -## Stopping Typed Actors - -Since Akka's Typed Actors are backed by Akka Actors they must be stopped when they aren't needed anymore. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-stop } - -This asynchronously stops the Typed Actor associated with the specified proxy ASAP. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-poisonpill } - -This asynchronously stops the Typed Actor associated with the specified proxy -after it's done with all calls that were made prior to this call. - -## Typed Actor Hierarchies - -Since you can obtain a contextual Typed Actor Extension by passing in an `ActorContext` -you can create child Typed Actors by invoking `typedActorOf(..)` on that. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-hierarchy } - -You can also create a child Typed Actor in regular Akka Actors by giving the `AbstractActor.ActorContext` -as an input parameter to TypedActor.get(…). - -## Supervisor Strategy - -By having your Typed Actor implementation class implement `TypedActor.Supervisor` -you can define the strategy to use for supervising child actors, as described in -@ref:[supervision](general/supervision.md) and @ref:[Fault Tolerance](fault-tolerance.md). - -## Receive arbitrary messages - -If your implementation class of your TypedActor extends `akka.actor.TypedActor.Receiver`, -all messages that are not `MethodCall` instances will be passed into the `onReceive`-method. - -This allows you to react to DeathWatch `Terminated`-messages and other types of messages, -e.g. when interfacing with untyped actors. - -## Lifecycle callbacks - -By having your Typed Actor implementation class implement any and all of the following: - - * `TypedActor.PreStart` - * `TypedActor.PostStop` - * `TypedActor.PreRestart` - * `TypedActor.PostRestart` - -You can hook into the lifecycle of your Typed Actor. - -## Proxying - -You can use the `typedActorOf` that takes a TypedProps and an ActorRef to proxy the given ActorRef as a TypedActor. -This is usable if you want to communicate remotely with TypedActors on other machines, just pass the `ActorRef` to `typedActorOf`. - -## Lookup & Remoting - -Since `TypedActors` are backed by `Akka Actors`, you can use `typedActorOf` to proxy `ActorRefs` potentially residing on remote nodes. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-remote } - -## Typed Router pattern - -Sometimes you want to spread messages between multiple actors. The easiest way to achieve this in Akka is to use a @ref:[Router](routing.md), -which can implement a specific routing logic, such as `smallest-mailbox` or `consistent-hashing` etc. - -Routers are not provided directly for typed actors, but it is really easy to leverage an untyped router and use a typed proxy in front of it. -To showcase this let's create typed actors that assign themselves some random `id`, so we know that in fact, the router has sent the message to different actors: - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-router-types } - -In order to round robin among a few instances of such actors, you can simply create a plain untyped router, -and then facade it with a `TypedActor` like shown in the example below. This works because typed actors of course -communicate using the same mechanisms as normal actors, and methods calls on them get transformed into message sends of `MethodCall` messages. - -@@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-router } diff --git a/akka-docs/src/main/paradox/java/typed-actors.md b/akka-docs/src/main/paradox/java/typed-actors.md new file mode 120000 index 0000000000..12add5ae3b --- /dev/null +++ b/akka-docs/src/main/paradox/java/typed-actors.md @@ -0,0 +1 @@ +../scala/typed-actors.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/java/typed.md b/akka-docs/src/main/paradox/java/typed.md deleted file mode 100644 index 76d333aa6e..0000000000 --- a/akka-docs/src/main/paradox/java/typed.md +++ /dev/null @@ -1,335 +0,0 @@ -# Akka Typed - -@@@ warning - -This module is currently marked as @ref:[may change](common/may-change.md) in the sense - of being the subject of active research. This means that API or semantics can - change without warning or deprecation period and it is not recommended to use - this module in production just yet—you have been warned. - -@@@ - -## Dependency - -Akka Typed APIs are bundled in the `akka-typed` artifact. -Make sure that you have the following dependency in your project: - -sbt -: @@@vars - ``` - "com.typesafe.akka" %% "akka-typed" % "$akka.version$" - ``` - @@@ - -gradle -: @@@vars - ``` - dependencies { - compile group: 'com.typesafe.akka', name: 'akka-typed_2.11', version: '$akka.version$' - } - ``` - @@@ - -maven -: @@@vars - ``` - - com.typesafe.akka - akka-typed_$scala.binary_version$ - $akka.version$ - - ``` - @@@ - -## Introduction - -As discussed in @ref:[Actor Systems](general/actor-systems.md) (and following chapters) Actors are about -sending messages between independent units of computation, but how does that -look like? In all of the following these imports are assumed: - -Scala -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #imports } - -Java -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #imports } - -With these in place we can define our first Actor, and of course it will say -hello! - -Scala -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world-actor } - -Java -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world-actor } - -This small piece of code defines two message types, one for commanding the -Actor to greet someone and one that the Actor will use to confirm that it has -done so. The `Greet` type contains not only the information of whom to -greet, it also holds an `ActorRef` that the sender of the message -supplies so that the `HelloWorld` Actor can send back the confirmation -message. - -The behavior of the Actor is defined as the `greeter` value with the help -of the `immutable` behavior constructor. This constructor is called -immutable because the behavior instance doesn't have or close over any mutable -state. Processing the next message may result in a new behavior that can -potentially be different from this one. State is updated by returning a new -behavior that holds the new immutable state. In this case we don't need to -update any state, so we return `Same`. - -The type of the messages handled by this behavior is declared to be of class -`Greet`, which implies that the supplied function’s `msg` argument is -also typed as such. This is why we can access the `whom` and `replyTo` -members without needing to use a pattern match. - -On the last line we see the `HelloWorld` Actor send a message to another -Actor, which is done using the `!` operator (pronounced “tell”). Since the -`replyTo` address is declared to be of type `ActorRef` the -compiler will only permit us to send messages of this type, other usage will -not be accepted. - -The accepted message types of an Actor together with all reply types defines -the protocol spoken by this Actor; in this case it is a simple request–reply -protocol but Actors can model arbitrarily complex protocols when needed. The -protocol is bundled together with the behavior that implements it in a nicely -wrapped scope—the `HelloWorld` class. - -Now we want to try out this Actor, so we must start an ActorSystem to host it: - -Scala -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #hello-world } - -Java -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world } - -We start an Actor system from the defined `greeter` behavior. - -As Carl Hewitt said, one Actor is no Actor—it would be quite lonely with -nobody to talk to. In this sense the example is a little cruel because we only -give the `HelloWorld` Actor a fake person to talk to—the “ask” pattern -can be used to send a message such that the reply fulfills a `CompletionStage`. - -Note that the `CompletionStage` that is returned by the “ask” operation is -properly typed already, no type checks or casts needed. This is possible due to -the type information that is part of the message protocol: the `ask` operator -takes as argument a function that pass an `ActorRef`, which is the -`replyTo` parameter of the `Greet` message, which means that when sending -the reply message to that `ActorRef` the message that fulfills the -`CompletionStage` can only be of type `Greeted`. - -We use this here to send the `Greet` command to the Actor and when the -reply comes back we will print it out and tell the actor system to shut down and -the program ends. - -This shows that there are aspects of Actor messaging that can be type-checked -by the compiler, but this ability is not unlimited, there are bounds to what we -can statically express. Before we go on with a more complex (and realistic) -example we make a small detour to highlight some of the theory behind this. - -## A Little Bit of Theory - -The [Actor Model](http://en.wikipedia.org/wiki/Actor_model) as defined by -Hewitt, Bishop and Steiger in 1973 is a computational model that expresses -exactly what it means for computation to be distributed. The processing -units—Actors—can only communicate by exchanging messages and upon reception of a -message an Actor can do the following three fundamental actions: - - 1. send a finite number of messages to Actors it knows - 2. create a finite number of new Actors - 3. designate the behavior to be applied to the next message - -The Akka Typed project expresses these actions using behaviors and addresses. -Messages can be sent to an address and behind this façade there is a behavior -that receives the message and acts upon it. The binding between address and -behavior can change over time as per the third point above, but that is not -visible on the outside. - -With this preamble we can get to the unique property of this project, namely -that it introduces static type checking to Actor interactions: addresses are -parameterized and only messages that are of the specified type can be sent to -them. The association between an address and its type parameter must be made -when the address (and its Actor) is created. For this purpose each behavior is -also parameterized with the type of messages it is able to process. Since the -behavior can change behind the address façade, designating the next behavior is -a constrained operation: the successor must handle the same type of messages as -its predecessor. This is necessary in order to not invalidate the addresses -that refer to this Actor. - -What this enables is that whenever a message is sent to an Actor we can -statically ensure that the type of the message is one that the Actor declares -to handle—we can avoid the mistake of sending completely pointless messages. -What we cannot statically ensure, though, is that the behavior behind the -address will be in a given state when our message is received. The fundamental -reason is that the association between address and behavior is a dynamic -runtime property, the compiler cannot know it while it translates the source -code. - -This is the same as for normal Java objects with internal variables: when -compiling the program we cannot know what their value will be, and if the -result of a method call depends on those variables then the outcome is -uncertain to a degree—we can only be certain that the returned value is of a -given type. - -We have seen above that the return type of an Actor command is described by the -type of reply-to address that is contained within the message. This allows a -conversation to be described in terms of its types: the reply will be of type -A, but it might also contain an address of type B, which then allows the other -Actor to continue the conversation by sending a message of type B to this new -address. While we cannot statically express the “current” state of an Actor, we -can express the current state of a protocol between two Actors, since that is -just given by the last message type that was received or sent. - -In the next section we demonstrate this on a more realistic example. - -## A More Complex Example - -Consider an Actor that runs a chat room: client Actors may connect by sending -a message that contains their screen name and then they can post messages. The -chat room Actor will disseminate all posted messages to all currently connected -client Actors. The protocol definition could look like the following: - -Scala -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-protocol } - -Java -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-protocol } - -Initially the client Actors only get access to an `ActorRef` -which allows them to make the first step. Once a client’s session has been -established it gets a `SessionGranted` message that contains a `handle` to -unlock the next protocol step, posting messages. The `PostMessage` -command will need to be sent to this particular address that represents the -session that has been added to the chat room. The other aspect of a session is -that the client has revealed its own address, via the `replyTo` argument, so that subsequent -`MessagePosted` events can be sent to it. - -This illustrates how Actors can express more than just the equivalent of method -calls on Java objects. The declared message types and their contents describe a -full protocol that can involve multiple Actors and that can evolve over -multiple steps. The implementation of the chat room protocol would be as simple -as the following: - -Scala -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-behavior } - -Java -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-behavior } - -The core of this behavior is stateful, the chat room itself does not change -into something else when sessions are established, but we introduce a variable -that tracks the opened sessions. Note that by using a method parameter a `var` -is not needed. When a new `GetSession` command comes in we add that client to the -list that is in the returned behavior. Then we also need to create the session’s -`ActorRef` that will be used to post messages. In this case we want to -create a very simple Actor that just repackages the `PostMessage` -command into a `PostSessionMessage` command which also includes the -screen name. Such a wrapper Actor can be created by using the -`spawnAdapter` method on the `ActorContext`, so that we can then -go on to reply to the client with the `SessionGranted` result. - -The behavior that we declare here can handle both subtypes of `Command`. -`GetSession` has been explained already and the -`PostSessionMessage` commands coming from the wrapper Actors will -trigger the dissemination of the contained chat room message to all connected -clients. But we do not want to give the ability to send -`PostSessionMessage` commands to arbitrary clients, we reserve that -right to the wrappers we create—otherwise clients could pose as completely -different screen names (imagine the `GetSession` protocol to include -authentication information to further secure this). Therefore `PostSessionMessage` -has `private` visibility and can't be created outside the actor. - -If we did not care about securing the correspondence between a session and a -screen name then we could change the protocol such that `PostMessage` is -removed and all clients just get an `ActorRef` to -send to. In this case no wrapper would be needed and we could just use -`ctx.getSelf()`. The type-checks work out in that case because -`ActorRef` is contravariant in its type parameter, meaning that we -can use a `ActorRef` wherever an -`ActorRef` is needed—this makes sense because the -former simply speaks more languages than the latter. The opposite would be -problematic, so passing an `ActorRef` where -`ActorRef` is required will lead to a type error. - -### Trying it out - -In order to see this chat room in action we need to write a client Actor that can use it: - -Scala -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-gabbler } - -Java -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-gabbler } - -From this behavior we can create an Actor that will accept a chat room session, -post a message, wait to see it published, and then terminate. The last step -requires the ability to change behavior, we need to transition from the normal -running behavior into the terminated state. This is why here we do not return -`same`, as above, but another special value `stopped`. - -Now to try things out we must start both a chat room and a gabbler and of -course we do this inside an Actor system. Since there can be only one guardian -supervisor we could either start the chat room from the gabbler (which we don’t -want—it complicates its logic) or the gabbler from the chat room (which is -nonsensical) or we start both of them from a third Actor—our only sensible -choice: - -Scala -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala) { #chatroom-main } - -Java -: @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-main } - -In good tradition we call the `main` Actor what it is, it directly -corresponds to the `main` method in a traditional Java application. This -Actor will perform its job on its own accord, we do not need to send messages -from the outside, so we declare it to be of type `Void`. Actors receive not -only external messages, they also are notified of certain system events, -so-called Signals. In order to get access to those we choose to implement this -particular one using the `immutable` behavior decorator. The -provided `onSignal` function will be invoked for signals (subclasses of `Signal`) -or the `onMessage` function for user messages. - -This particular `main` Actor is created using `Actor.deferred`, which is like a factory for a behavior. -Creation of the behavior instance is deferred until the actor is started, as opposed to `Actor.immutable` -that creates the behavior instance immediately before the actor is running. The factory function in -`deferred` pass the `ActorContext` as parameter and that can for example be used for spawning child actors. -This `main` Actor creates the chat room and the gabbler and the session between them is initiated, and when the -gabbler is finished we will receive the `Terminated` event due to having -called `ctx.watch` for it. This allows us to shut down the Actor system: when -the main Actor terminates there is nothing more to do. - -## Status of this Project and Relation to Akka Actors - -Akka Typed is the result of many years of research and previous attempts -(including Typed Channels in the 2.2.x series) and it is on its way to -stabilization, but maturing such a profound change to the core concept of Akka -will take a long time. We expect that this module will stay marked -@ref:[may change](common/may-change.md) for multiple major releases of Akka and the -plain `akka.actor.Actor` will not be deprecated or go away anytime soon. - -Being a research project also entails that the reference documentation is not -as detailed as it will be for a final version, please refer to the API -documentation for greater depth and finer detail. - -### Main Differences - -The most prominent difference is the removal of the `sender()` functionality. -This turned out to be the Achilles heel of the Typed Channels project, it is -the feature that makes its type signatures and macros too complex to be viable. -The solution chosen in Akka Typed is to explicitly include the properly typed -reply-to address in the message, which both burdens the user with this task but -also places this aspect of protocol design where it belongs. - -The other prominent difference is the removal of the `Actor` trait. In -order to avoid closing over unstable references from different execution -contexts (e.g. Future transformations) we turned all remaining methods that -were on this trait into messages: the behavior receives the -`ActorContext` as an argument during processing and the lifecycle hooks -have been converted into Signals. - -A side-effect of this is that behaviors can now be tested in isolation without -having to be packaged into an Actor, tests can run fully synchronously without -having to worry about timeouts and spurious failures. Another side-effect is -that behaviors can nicely be composed and decorated, see `tap`, or -`widened` combinators; nothing about these is special or internal, new -combinators can be written as external libraries or tailor-made for each project. diff --git a/akka-docs/src/main/paradox/java/typed.md b/akka-docs/src/main/paradox/java/typed.md new file mode 120000 index 0000000000..62300f4d39 --- /dev/null +++ b/akka-docs/src/main/paradox/java/typed.md @@ -0,0 +1 @@ +../scala/typed.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/actordsl.md b/akka-docs/src/main/paradox/scala/actordsl.md deleted file mode 100644 index 9892c895d8..0000000000 --- a/akka-docs/src/main/paradox/scala/actordsl.md +++ /dev/null @@ -1,82 +0,0 @@ -# Actor DSL - -@@@ warning - -Actor DSL is deprecated and will be removed in the near future. -Use plain `system.actorOf` or `context.actorOf` instead. - -@@@ - -## The Actor DSL - -Simple actors—for example one-off workers or even when trying things out in the -REPL—can be created more concisely using the `Act` trait. The supporting -infrastructure is bundled in the following import: - -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #import } - -This import is assumed for all code samples throughout this section. The -implicit actor system serves as `ActorRefFactory` for all examples -below. To define a simple actor, the following is sufficient: - -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #simple-actor } - -Here, `actor` takes the role of either `system.actorOf` or -`context.actorOf`, depending on which context it is called in: it takes an -implicit `ActorRefFactory`, which within an actor is available in the -form of the `implicit val context: ActorContext`. Outside of an actor, you’ll -have to either declare an implicit `ActorSystem`, or you can give the -factory explicitly (see further below). - -The two possible ways of issuing a `context.become` (replacing or adding the -new behavior) are offered separately to enable a clutter-free notation of -nested receives: - -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #becomeStacked } - -Please note that calling `unbecome` more often than `becomeStacked` results -in the original behavior being installed, which in case of the `Act` -trait is the empty behavior (the outer `become` just replaces it during -construction). - -### Life-cycle management - -Life-cycle hooks are also exposed as DSL elements (see @ref:[Start Hook](actors.md#start-hook) and @ref:[Stop Hook](actors.md#stop-hook)), where later invocations of the methods shown below will replace the contents of the respective hooks: - -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #simple-start-stop } - -The above is enough if the logical life-cycle of the actor matches the restart -cycles (i.e. `whenStopping` is executed before a restart and `whenStarting` -afterwards). If that is not desired, use the following two hooks (see @ref:[Restart Hooks](actors.md#restart-hook)): - -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #failing-actor } - -It is also possible to create nested actors, i.e. grand-children, like this: - -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #nested-actor } - -@@@ note - -In some cases it will be necessary to explicitly pass the -`ActorRefFactory` to the `actor()` method (you will notice when -the compiler tells you about ambiguous implicits). - -@@@ - -The grand-child will be supervised by the child; the supervisor strategy for -this relationship can also be configured using a DSL element (supervision -directives are part of the `Act` trait): - -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #supervise-with } - -### Actor with `Stash` - -Last but not least there is a little bit of convenience magic built-in, which -detects if the runtime class of the statically given actor subtype extends the -`RequiresMessageQueue` trait via the `Stash` trait (this is a -complicated way of saying that `new Act with Stash` would not work because its -runtime erased type is just an anonymous subtype of `Act`). The purpose is to -automatically use the appropriate deque-based mailbox type required by `Stash`. -If you want to use this magic, simply extend `ActWithStash`: - -@@snip [ActorDSLSpec.scala]($akka$/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala) { #act-with-stash } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/actors.md b/akka-docs/src/main/paradox/scala/actors.md index bfac206982..0d389ca5b0 100644 --- a/akka-docs/src/main/paradox/scala/actors.md +++ b/akka-docs/src/main/paradox/scala/actors.md @@ -383,9 +383,21 @@ A path in an actor system represents a "place" which might be occupied by a living actor. Initially (apart from system initialized actors) a path is empty. When `actorOf()` is called it assigns an *incarnation* of the actor described by the passed `Props` to the given path. An actor incarnation is -identified by the path *and a UID*. A restart only swaps the `Actor` +identified by the path *and a UID*. + +It is worth noting about the difference between: + +* restart +* stop, followed by re-creation of actor + +as explained below. + +A restart only swaps the `Actor` instance defined by the `Props` but the incarnation and hence the UID remains the same. +As long as the incarnation is same, you can keep using the same `ActorRef`. +Restart is handled by the [Supervision Strategy](fault-tolerance.md#creating-a-supervisor-strategy) of actor's parent actor, +and there is more discussion about [what restart means](../general/supervision.md#what-restarting-means) The lifecycle of an incarnation ends when the actor is stopped. At that point the appropriate lifecycle events are called and watching actors @@ -598,7 +610,7 @@ You can also acquire an `ActorRef` for an `ActorSelection` with the `resolveOne` method of the `ActorSelection`. It returns a `Future` of the matching `ActorRef` if such an actor exists. @java[(see also @ref:[Java 8 Compatibility](java8-compat.md) for Java compatibility).] It is completed with -failure [[akka.actor.ActorNotFound]] if no such actor exists or the identification +failure `akka.actor.ActorNotFound` if no such actor exists or the identification didn't complete within the supplied `timeout`. Remote actor addresses may also be looked up, if @ref:[remoting](remoting.md) is enabled: @@ -733,8 +745,12 @@ is available in the `akka.pattern.PatternsCS` object. @@@ warning -To complete the future with an exception you need send a Failure message to the sender. -This is *not done automatically* when an actor throws an exception while processing a message. +To complete the future with an exception you need to send an `akka.actor.Status.Failure` message to the sender. +This is *not done automatically* when an actor throws an exception while processing a message. + +Please note that Scala's `Try` sub types `scala.util.Failure` and `scala.util.Success` are not treated +specially, and would complete the ask Future with the given value - only the `akka.actor.Status` messages +are treated specially by the ask pattern. @@@ @@ -822,10 +838,10 @@ You can build such behavior with a builder named `ReceiveBuilder`. Here is an ex @@@ -@Scala +Scala : @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #imports1 #my-actor } -@Java +Java : @@snip [MyActor.java]($code$/java/jdocs/actor/MyActor.java) { #imports #my-actor } @@@ div { .group-java } @@ -851,7 +867,7 @@ That has benefits such as: The `Receive` can be implemented in other ways than using the `ReceiveBuilder` since it in the end is just a wrapper around a Scala `PartialFunction`. In Java, you can implement `PartialFunction` by extending `AbstractPartialFunction`. For example, one could implement an adapter -to [Javaslang Pattern Matching DSL](http://www.javaslang.io/javaslang-jdocs/#_pattern_matching). +to [Vavr Pattern Matching DSL](http://www.vavr.io/vavr-docs/#_pattern_matching). If the validation of the `ReceiveBuilder` match logic turns out to be a bottleneck for some of your actors you can consider to implement it at lower level by extending `UntypedAbstractActor` instead @@ -905,6 +921,29 @@ Messages marked with `NotInfluenceReceiveTimeout` will not reset the timer. This `ReceiveTimeout` should be fired by external inactivity but not influenced by internal activity, e.g. scheduled tick messages. + + +## Timers, scheduled messages + +Messages can be scheduled to be sent at a later point by using the @ref:[Scheduler](scheduler.md) directly, +but when scheduling periodic or single messages in an actor to itself it's more convenient and safe +to use the support for named timers. The lifecycle of scheduled messages can be difficult to manage +when the actor is restarted and that is taken care of by the timers. + +Scala +: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/TimerDocSpec.scala) { #timers } + +Java +: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/TimerDocTest.java) { #timers } + +Each timer has a key and can be replaced or cancelled. It's guaranteed that a message from the +previous incarnation of the timer with the same key is not received, even though it might already +be enqueued in the mailbox when it was cancelled or the new timer was started. + +The timers are bound to the lifecycle of the actor that owns it, and thus are cancelled +automatically when it is restarted or stopped. Note that the `TimerScheduler` is not thread-safe, +i.e. it must only be used within the actor that owns it. + ## Stopping actors @@ -981,16 +1020,10 @@ Java termination of several actors: Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #gracefulStop } +: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #gracefulStop} Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #import-gracefulStop #gracefulStop } - -Scala -: @@snip [ActorDocSpec.scala]($code$/scala/docs/actor/ActorDocSpec.scala) { #gracefulStop-actor } - -Java -: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #gracefulStop-actor } +: @@snip [ActorDocTest.java]($code$/java/jdocs/actor/ActorDocTest.java) { #gracefulStop} When `gracefulStop()` returns successfully, the actor’s `postStop()` hook will have been executed: there exists a happens-before edge between the end of @@ -1309,15 +1342,15 @@ The rich lifecycle hooks of Actors provide a useful toolkit to implement various lifetime of an `ActorRef`, an actor can potentially go through several restarts, where the old instance is replaced by a fresh one, invisibly to the outside observer who only sees the `ActorRef`. -One may think about the new instances as "incarnations". Initialization might be necessary for every incarnation -of an actor, but sometimes one needs initialization to happen only at the birth of the first instance when the +Initialization might be necessary every time an actor is instantiated, +but sometimes one needs initialization to happen only at the birth of the first instance when the `ActorRef` is created. The following sections provide patterns for different initialization needs. ### Initialization via constructor Using the constructor for initialization has various benefits. First of all, it makes it possible to use `val` fields to store any state that does not change during the life of the actor instance, making the implementation of the actor more robust. -The constructor is invoked for every incarnation of the actor, therefore the internals of the actor can always assume +The constructor is invoked when an actor instance is created calling `actorOf` and also on restart, therefore the internals of the actor can always assume that proper initialization happened. This is also the drawback of this approach, as there are cases when one would like to avoid reinitializing internals on restart. For example, it is often useful to preserve child actors across restarts. The following section provides a pattern for this case. @@ -1326,11 +1359,11 @@ restarts. The following section provides a pattern for this case. The method `preStart()` of an actor is only called once directly during the initialization of the first instance, that is, at creation of its `ActorRef`. In the case of restarts, `preStart()` is called from `postRestart()`, therefore -if not overridden, `preStart()` is called on every incarnation. However, by overriding `postRestart()` one can disable +if not overridden, `preStart()` is called on every restart. However, by overriding `postRestart()` one can disable this behavior, and ensure that there is only one call to `preStart()`. One useful usage of this pattern is to disable creation of new `ActorRefs` for children during restarts. This can be -achieved by overriding `preRestart()`: +achieved by overriding `preRestart()`. Below is the default implementation of these lifecycle hooks: Scala : @@snip [InitializationDocSpec.scala]($code$/scala/docs/actor/InitializationDocSpec.scala) { #preStartInit } diff --git a/akka-docs/src/main/paradox/scala/additional/books.md b/akka-docs/src/main/paradox/scala/additional/books.md index 90ab4367be..b1d6c61bd9 100644 --- a/akka-docs/src/main/paradox/scala/additional/books.md +++ b/akka-docs/src/main/paradox/scala/additional/books.md @@ -15,5 +15,7 @@ ## Videos + * [Effective Akka HTTP](https://www.youtube.com/watch?v=uxQta776jJI), by Johannes Rudolph, Reactive Systems Meetup Hamburg, November 2016 + * [Zen of Akka](https://www.youtube.com/watch?v=vgFoKOxrTzg) - an overview of good and bad practices in Akka, by Konrad Malawski, ScalaDays New York, June 2016 * [Learning Akka Videos](https://www.packtpub.com/application-development/learning-akka-video), by Salma Khater, PACKT Publishing, ISBN: 9781784391836, January 2016 * [Building Microservice with AKKA HTTP (Video)](https://www.packtpub.com/application-development/building-microservice-akka-http-video), by Tomasz Lelek, PACKT Publishing, ISBN: 9781788298582, March 2017 diff --git a/akka-docs/src/main/paradox/scala/additional/osgi.md b/akka-docs/src/main/paradox/scala/additional/osgi.md index a8c38f6e88..d359d7189a 100644 --- a/akka-docs/src/main/paradox/scala/additional/osgi.md +++ b/akka-docs/src/main/paradox/scala/additional/osgi.md @@ -70,7 +70,7 @@ in an application composed of multiple JARs to reside under a single package nam might scan all classes from `com.example.plugins` for specific service implementations with that package existing in several contributed JARs. While it is possible to support overlapping packages with complex manifest headers, it's much better to use non-overlapping -package spaces and facilities such as @ref[Akka Cluster](../common/cluster.md) +package spaces and facilities such as @ref:[Akka Cluster](../common/cluster.md) for service discovery. Stylistically, many organizations opt to use the root package path as the name of the bundle distribution file. diff --git a/akka-docs/src/main/paradox/scala/camel.md b/akka-docs/src/main/paradox/scala/camel.md index 446aed45a6..efb4c6eb67 100644 --- a/akka-docs/src/main/paradox/scala/camel.md +++ b/akka-docs/src/main/paradox/scala/camel.md @@ -24,31 +24,57 @@ APIs. The [camel-extra](http://code.google.com/p/camel-extra/) project provides ### Consumer -Usage of Camel's integration components in Akka is essentially a -one-liner. Here's an example. +Here's an example of using Camel's integration components in Akka. -@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #Consumer-mina } +Scala +: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #Consumer-mina } + +Java +: @@snip [MyEndpoint.java]($code$/java/jdocs/camel/MyEndpoint.java) { #Consumer-mina } The above example exposes an actor over a TCP endpoint via Apache -Camel's [Mina component](http://camel.apache.org/mina2.html). The actor implements the endpointUri method to define +Camel's [Mina component](http://camel.apache.org/mina2.html). The actor implements the @scala[`endpointUri`]@java[`getEndpointUri`] method to define an endpoint from which it can receive messages. After starting the actor, TCP clients can immediately send messages to and receive responses from that actor. If the message exchange should go over HTTP (via Camel's Jetty -component, only the actor's endpointUri method must be changed. +component), the actor's @scala[`endpointUri`]@java[`getEndpointUri`] method should return a different URI, for instance `jetty:http://localhost:8877/example`. + +@@@ div { .group-scala } @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #Consumer } +@@@ + +@@@ div { .group-java } + +In the above case an extra constructor is added that can set the endpoint URI, which would result in +the `getEndpointUri` returning the URI that was set using this constructor. + +@@@ + ### Producer Actors can also trigger message exchanges with external systems i.e. produce to Camel endpoints. -@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #imports #Producer } +Scala +: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #imports #Producer } + +Java +: @@snip [Orders.java]($code$/java/jdocs/camel/Orders.java) { #Producer } In the above example, any message sent to this actor will be sent to -the JMS queue `orders`. Producer actors may choose from the same set of Camel +the JMS queue @scala[`orders`]@java[`Orders`]. Producer actors may choose from the same set of Camel components as Consumer actors do. +@@@ div { .group-java } + +Below an example of how to send a message to the `Orders` producer. + +@@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #TellProducer } + +@@@ + ### CamelMessage The number of Camel components is constantly increasing. The akka-camel module @@ -69,68 +95,93 @@ You can also create a CamelMessage yourself with the appropriate body and header The akka-camel module is implemented as an Akka Extension, the `CamelExtension` object. Extensions will only be loaded once per `ActorSystem`, which will be managed by Akka. -The `CamelExtension` object provides access to the @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) trait. -The @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) trait in turn provides access to two important Apache Camel objects, the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and the `ProducerTemplate`. +The `CamelExtension` object provides access to the @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) @scala[trait]@java[interface]. +The @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) @scala[trait]@java[interface] in turn provides access to two important Apache Camel objects, the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and the `ProducerTemplate`. Below you can see how you can get access to these Apache Camel objects. -@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelExtension } +Scala +: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelExtension } + +Java +: @@snip [CamelExtensionTest.java]($code$/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtension } One `CamelExtension` is only loaded once for every one `ActorSystem`, which makes it safe to call the `CamelExtension` at any point in your code to get to the Apache Camel objects associated with it. There is one [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) and one `ProducerTemplate` for every one `ActorSystem` that uses a `CamelExtension`. By Default, a new [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) is created when the `CamelExtension` starts. If you want to inject your own context instead, -you can extend the @extref[ContextProvider](github:akka-camel/src/main/scala/akka/camel/ContextProvider.scala) trait and add the FQCN of your implementation in the config, as the value of the "akka.camel.context-provider". -This interface define a single method `getContext` used to load the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java). +you can @scala[extend]@java[implement] the @extref[ContextProvider](github:akka-camel/src/main/scala/akka/camel/ContextProvider.scala) @scala[trait]@java[interface] and add the FQCN of your implementation in the config, as the value of the "akka.camel.context-provider". +This interface define a single method `getContext()` used to load the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java). Below an example on how to add the ActiveMQ component to the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java), which is required when you would like to use the ActiveMQ component. -@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelExtensionAddComponent } +Scala +: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelExtensionAddComponent } + +Java +: @@snip [CamelExtensionTest.java]($code$/java/jdocs/camel/CamelExtensionTest.java) { #CamelExtensionAddComponent } The [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) joins the lifecycle of the `ActorSystem` and `CamelExtension` it is associated with; the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) is started when the `CamelExtension` is created, and it is shut down when the associated `ActorSystem` is shut down. The same is true for the `ProducerTemplate`. -The `CamelExtension` is used by both *Producer* and *Consumer* actors to interact with Apache Camel internally. -You can access the `CamelExtension` inside a *Producer* or a *Consumer* using the `camel` definition, or get straight at the *CamelContext* using the `camelContext` definition. -Actors are created and started asynchronously. When a *Consumer* actor is created, the *Consumer* is published at its Camel endpoint (more precisely, the route is added to the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) from the [Endpoint](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Endpoint.java) to the actor). -When a *Producer* actor is created, a [SendProcessor](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java) and [Endpoint](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Endpoint.java) are created so that the Producer can send messages to it. +The `CamelExtension` is used by both `Producer` and `Consumer` actors to interact with Apache Camel internally. +You can access the `CamelExtension` inside a `Producer` or a `Consumer` using the `camel` @scala[definition]@java[method], or get straight at the `CamelContext` +using the @scala[`camelContext` definition]@java[`getCamelContext` method or to the `ProducerTemplate` using the `getProducerTemplate` method]. +Actors are created and started asynchronously. When a `Consumer` actor is created, the `Consumer` is published at its Camel endpoint (more precisely, the route is added to the [CamelContext](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java) from the [Endpoint](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Endpoint.java) to the actor). +When a `Producer` actor is created, a [SendProcessor](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java) and [Endpoint](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Endpoint.java) are created so that the Producer can send messages to it. Publication is done asynchronously; setting up an endpoint may still be in progress after you have requested the actor to be created. Some Camel components can take a while to startup, and in some cases you might want to know when the endpoints are activated and ready to be used. -The @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) trait allows you to find out when the endpoint is activated or deactivated. +The @extref[Camel](github:akka-camel/src/main/scala/akka/camel/Camel.scala) @scala[trait]@java[interface] allows you to find out when the endpoint is activated or deactivated. -@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelActivation } +Scala +: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelActivation } + +Java +: @@snip [ActivationTestBase.java]($code$/java/jdocs/camel/ActivationTestBase.java) { #CamelActivation } The above code shows that you can get a `Future` to the activation of the route from the endpoint to the actor, or you can wait in a blocking fashion on the activation of the route. An `ActivationTimeoutException` is thrown if the endpoint could not be activated within the specified timeout. Deactivation works in a similar fashion: -@@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelDeactivation } +Scala +: @@snip [Introduction.scala]($code$/scala/docs/camel/Introduction.scala) { #CamelDeactivation } + +Java +: @@snip [ActivationTestBase.java]($code$/java/jdocs/camel/ActivationTestBase.java) { #CamelDeactivation } Deactivation of a Consumer or a Producer actor happens when the actor is terminated. For a Consumer, the route to the actor is stopped. For a Producer, the [SendProcessor](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java) is stopped. A `DeActivationTimeoutException` is thrown if the associated camel objects could not be deactivated within the specified timeout. ## Consumer Actors -For objects to receive messages, they must mixin the @extref[Consumer](github:akka-camel/src/main/scala/akka/camel/Consumer.scala) -trait. For example, the following actor class (Consumer1) implements the -endpointUri method, which is declared in the Consumer trait, in order to receive +For objects to receive messages, they must @scala[mixin the @extref[Consumer](github:akka-camel/src/main/scala/akka/camel/Consumer.scala) trait]@java[inherit from the @extref[UntypedConsumerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala) class]. +For example, the following actor class (Consumer1) implements the +@scala[`endpointUri`]@java[`getEndpointUri`] method, which is declared in the @scala[`Consumer` trait]@java[@extref[UntypedConsumerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala) class], in order to receive messages from the `file:data/input/actor` Camel endpoint. -@@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer1 } +Scala +: @@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer1 } + +Java +: @@snip [Consumer1.java]($code$/java/jdocs/camel/Consumer1.java) { #Consumer1 } Whenever a file is put into the data/input/actor directory, its content is picked up by the Camel [file component](http://camel.apache.org/file2.html) and sent as message to the actor. Messages consumed by actors from Camel endpoints are of type -@extref[CamelMessage](github:akka-camel/src/main/scala/akka/camel/CamelMessage.scala). These are immutable representations of Camel messages. +[CamelMessage](#camelmessage). These are immutable representations of Camel messages. Here's another example that sets the endpointUri to `jetty:http://localhost:8877/camel/default`. It causes Camel's Jetty component to start an embedded [Jetty](http://www.eclipse.org/jetty/) server, accepting HTTP connections from localhost on port 8877. -@@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer2 } +Scala +: @@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer2 } + +Java +: @@snip [Consumer2.java]($code$/java/jdocs/camel/Consumer2.java) { #Consumer2 } After starting the actor, clients can send messages to that actor by POSTing to `http://localhost:8877/camel/default`. The actor sends a response by using the -sender *!* method. For returning a message body and headers to the HTTP -client the response type should be @extref[CamelMessage](github:akka-camel/src/main/scala/akka/camel/CamelMessage.scala). For any other response type, a +sender @scala[`!`]@java[`getSender().tell`] method. For returning a message body and headers to the HTTP +client the response type should be [CamelMessage](#camelmessage). For any other response type, a new CamelMessage object is created by akka-camel with the actor response as message body. @@ -152,7 +203,11 @@ In this case, consumer actors must reply either with a special akka.camel.Ack message (positive acknowledgement) or a akka.actor.Status.Failure (negative acknowledgement). -@@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer3 } +Scala +: @@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer3 } + +Java +: @@snip [Consumer3.java]($code$/java/jdocs/camel/Consumer3.java) { #Consumer3 } ### Consumer timeout @@ -164,78 +219,119 @@ way which is described in the documentation of the individual Camel components. Another option is to configure timeouts on the level of consumer actors. Two-way communications between a Camel endpoint and an actor are -initiated by sending the request message to the actor with the @extref[ask](github:akka-actor/src/main/scala/akka/pattern/AskSupport.scala) pattern +initiated by sending the request message to the actor with the @scala[@extref[ask](github:akka-actor/src/main/scala/akka/pattern/AskSupport.scala)]@java[@extref[ask](github:akka-actor/src/main/scala/akka/pattern/Patterns.scala)] pattern and the actor replies to the endpoint when the response is ready. The ask request to the actor can timeout, which will result in the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java) failing with a TimeoutException set on the failure of the [Exchange](https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java). The timeout on the consumer actor can be overridden with the `replyTimeout`, as shown below. -@@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer4 } +Scala +: @@snip [Consumers.scala]($code$/scala/docs/camel/Consumers.scala) { #Consumer4 } + +Java +: @@snip [Consumer4.java]($code$/java/jdocs/camel/Consumer4.java) { #Consumer4 } ## Producer Actors -For sending messages to Camel endpoints, actors need to mixin the @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala) trait and implement the endpointUri method. +For sending messages to Camel endpoints, actors need to @scala[mixin the @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala) trait] +@java[inherit from the @extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class] and implement the `getEndpointUri` method. -@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Producer1 } +Scala +: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Producer1 } -Producer1 inherits a default implementation of the receive method from the -Producer trait. To customize a producer actor's default behavior you must override the @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).transformResponse and -@extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).transformOutgoingMessage methods. This is explained later in more detail. -Producer Actors cannot override the default @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).receive method. +Java +: @@snip [Producer1.java]($code$/java/jdocs/camel/Producer1.java) { #Producer1 } -Any message sent to a @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala) actor will be sent to +Producer1 inherits a default implementation of the @scala[`receive`]@java[`onReceive`] method from the +@scala[Producer trait]@java[@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala)] class. To customize a producer actor's default behavior you must override the +@scala[@extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).transformResponse]@java[@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformResponse] and +@scala[@extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).transformOutgoingMessage methods]@java[@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformOutgoingMessage methods]. This is explained later in more detail. +Producer Actors cannot override the @scala[default @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).receive]@java[@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onReceive] method. + +Any message sent to a @scala[@extref[`Producer`](github:akka-camel/src/main/scala/akka/camel/Producer.scala)]@java[Producer] actor will be sent to the associated Camel endpoint, in the above example to -`http://localhost:8080/news`. The @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala) always sends messages asynchronously. Response messages (if supported by the +`http://localhost:8080/news`. The @scala[@extref[`Producer`](github:akka-camel/src/main/scala/akka/camel/Producer.scala)]@java[@extref[`UntypedProducerActor`](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala)] always sends messages asynchronously. Response messages (if supported by the configured endpoint) will, by default, be returned to the original sender. The following example uses the ask pattern to send a message to a Producer actor and waits for a response. -@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #AskProducer } +Scala +: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #AskProducer } -The future contains the response CamelMessage, or an `AkkaCamelException` when an error occurred, which contains the headers of the response. +Java +: @@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #AskProducer } + +The future contains the response `CamelMessage`, or an `AkkaCamelException` when an error occurred, which contains the headers of the response. ### Custom Processing Instead of replying to the initial sender, producer actors can implement custom -response processing by overriding the routeResponse method. In the following example, the response +response processing by overriding the @scala[`routeResponse`]@java[`onRouteResponse`] method. In the following example, the response message is forwarded to a target actor instead of being replied to the original sender. -@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #RouteResponse } +Scala +: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #RouteResponse } + +Java +: @@snip [ResponseReceiver.java]($code$/java/jdocs/camel/ResponseReceiver.java) { #RouteResponse } + @@snip [Forwarder.java]($code$/java/jdocs/camel/Forwarder.java) { #RouteResponse } + @@snip [OnRouteResponseTestBase.java]($code$/java/jdocs/camel/OnRouteResponseTestBase.java) { #RouteResponse } Before producing messages to endpoints, producer actors can pre-process them by -overriding the @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).transformOutgoingMessage method. +overriding the @scala[@extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala).transformOutgoingMessage] +@java[@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala).onTransformOutgoingMessag] method. -@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #TransformOutgoingMessage } +Scala +: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #TransformOutgoingMessage } + +Java +: @@snip [Transformer.java]($code$/java/jdocs/camel/Transformer.java) { #TransformOutgoingMessage } ### Producer configuration options The interaction of producer actors with Camel endpoints can be configured to be one-way or two-way (by initiating in-only or in-out message exchanges, respectively). By default, the producer initiates an in-out message exchange -with the endpoint. For initiating an in-only exchange, producer actors have to override the oneway method to return true. +with the endpoint. For initiating an in-only exchange, producer actors have to override the @scala[`oneway`]@java[`isOneway`] method to return true. -@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Oneway } +Scala +: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Oneway } + +Java +: @@snip [OnewaySender.java]($code$/java/jdocs/camel/OnewaySender.java) { #Oneway } ### Message correlation To correlate request with response messages, applications can set the -*Message.MessageExchangeId* message header. +`Message.MessageExchangeId` message header. -@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Correlate } +Scala +: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #Correlate } + +Java +: @@snip [ProducerTestBase.java]($code$/java/jdocs/camel/ProducerTestBase.java) { #Correlate } ### ProducerTemplate -The @extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala) trait is a very +The @scala[@extref[Producer](github:akka-camel/src/main/scala/akka/camel/Producer.scala) trait]@java[@extref[UntypedProducerActor](github:akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala) class] is a very convenient way for actors to produce messages to Camel endpoints. Actors may also use a Camel `ProducerTemplate` for producing messages to endpoints. -@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #ProducerTemplate } +Scala +: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #ProducerTemplate } + +Java +: @@snip [MyActor.java]($code$/java/jdocs/camel/MyActor.java) { #ProducerTemplate } For initiating a two-way message exchange, one of the `ProducerTemplate.request*` methods must be used. -@@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #RequestProducerTemplate } +Scala +: @@snip [Producers.scala]($code$/scala/docs/camel/Producers.scala) { #RequestProducerTemplate } + +Java +: @@snip [RequestBodyActor.java]($code$/java/jdocs/camel/RequestBodyActor.java) { #RequestProducerTemplate } ## Asynchronous routing @@ -244,14 +340,14 @@ In-out message exchanges between endpoints and actors are designed to be asynchronous. This is the case for both, consumer and producer actors. - * A consumer endpoint sends request messages to its consumer actor using the `!` -(tell) operator and the actor returns responses with `sender !` once they are + * A consumer endpoint sends request messages to its consumer actor using the @scala[`!` (tell) operator ]@java[`tell` method] +and the actor returns responses with @scala[`sender !`]@java[`getSender().tell`] once they are ready. * A producer actor sends request messages to its endpoint using Camel's asynchronous routing engine. Asynchronous responses are wrapped and added to the producer actor's mailbox for later processing. By default, response messages are returned to the initial sender but this can be overridden by Producer -implementations (see also description of the `routeResponse` method +implementations (see also description of the @scala[`routeResponse`]@java[`onRouteResponse`] method in [Custom Processing](#camel-custom-processing)). However, asynchronous two-way message exchanges, without allocating a thread for @@ -291,13 +387,13 @@ most use cases, some applications may require more specialized routes to actors. The akka-camel module provides two mechanisms for customizing routes to actors, which will be explained in this section. These are: - * Usage of [camel components](#camel-components-2) to access actors. + * Usage of [Akka Camel components](#camel-components) to access actors. Any Camel route can use these components to access Akka actors. * [Intercepting route construction](#camel-intercepting-route-construction) to actors. This option gives you the ability to change routes that have already been added to Camel. Consumer actors have a hook into the route definition process which can be used to change the route. - + ### Akka Camel components Akka actors can be accessed from Camel routes using the actor Camel component. This component can be used to @@ -333,13 +429,22 @@ akka://some-system/user/myconsumer?autoAck=false&replyTimeout=100+millis ``` In the following example, a custom route to an actor is created, using the -actor's path. the Akka camel package contains an implicit `toActorRouteDefinition` that allows for a route to +actor's path. + +The Akka camel package contains an implicit `toActorRouteDefinition` that allows for a route to reference an `ActorRef` directly as shown in the below example, The route starts from a [Jetty](http://www.eclipse.org/jetty/) endpoint and ends at the target actor. -@@snip [CustomRoute.scala]($code$/scala/docs/camel/CustomRoute.scala) { #CustomRoute } +Scala +: @@snip [CustomRoute.scala]($code$/scala/docs/camel/CustomRoute.scala) { #CustomRoute } -When a message is received on the jetty endpoint, it is routed to the Responder actor, which in return replies back to the client of +Java +: @@snip [Responder.java]($code$/java/jdocs/camel/Responder.java) { #CustomRoute } + @@snip [CustomRouteBuilder.java]($code$/java/jdocs/camel/CustomRouteBuilder.java) { #CustomRoute } + @@snip [CustomRouteTestBase.java]($code$/java/jdocs/camel/CustomRouteTestBase.java) { #CustomRoute } + +@java[The `CamelPath.toCamelUri` converts the `ActorRef` to the Camel actor component URI format which points to the actor endpoint as described above.] +When a message is received on the jetty endpoint, it is routed to the `Responder` actor, which in return replies back to the client of the HTTP request. @@ -354,7 +459,11 @@ For example, an extension could be a custom error handler that redelivers messag The following examples demonstrate how to extend a route to a consumer actor for handling exceptions thrown by that actor. -@@snip [CustomRoute.scala]($code$/scala/docs/camel/CustomRoute.scala) { #ErrorThrowingConsumer } +Scala +: @@snip [CustomRoute.scala]($code$/scala/docs/camel/CustomRoute.scala) { #ErrorThrowingConsumer } + +Java +: @@snip [ErrorThrowingConsumer.java]($code$/java/jdocs/camel/ErrorThrowingConsumer.java) { #ErrorThrowingConsumer } The above ErrorThrowingConsumer sends the Failure back to the sender in preRestart because the Exception that is thrown in the actor would @@ -369,7 +478,7 @@ returned by the end method. See the [org.apache.camel.model](https://svn.apache. details). After executing the route definition handler, akka-camel finally calls a to(targetActorUri) on the returned ProcessorDefinition to complete the route to the consumer actor (where targetActorUri is the actor component URI as described in [Access to actors](#access-to-actors)). -If the actor cannot be found, a *ActorNotRegisteredException* is thrown. +If the actor cannot be found, a `ActorNotRegisteredException` is thrown. *) Before passing the RouteDefinition instance to the route definition handler, akka-camel may make some further modifications to it. @@ -377,7 +486,7 @@ akka-camel may make some further modifications to it. ## Examples -The sample named @extref[Akka Camel Samples with Scala](ecs:akka-samples-camel-scala) (@extref[source code](samples:akka-sample-camel-scala)) +The sample named @scala[@extref[Akka Camel Samples with Scala](ecs:akka-samples-camel-scala)(@extref[source code](samples:akka-sample-camel-scala))]@java[@extref[Akka Camel Samples with Java](ecs:akka-samples-camel-java)(@extref[source code](samples:akka-sample-camel-java))] contains 3 samples: * Asynchronous routing and transformation - This example demonstrates how to implement consumer and diff --git a/akka-docs/src/main/paradox/scala/cluster-metrics.md b/akka-docs/src/main/paradox/scala/cluster-metrics.md index 45ea7cb6d1..9781d7d188 100644 --- a/akka-docs/src/main/paradox/scala/cluster-metrics.md +++ b/akka-docs/src/main/paradox/scala/cluster-metrics.md @@ -185,7 +185,7 @@ akka.actor.deployment { routees.paths = ["/user/factorialBackend"] cluster { enabled = on - use-role = backend + use-roles = ["backend"] allow-local-routees = off } } diff --git a/akka-docs/src/main/paradox/scala/cluster-sharding.md b/akka-docs/src/main/paradox/scala/cluster-sharding.md index 535132e925..0055663751 100644 --- a/akka-docs/src/main/paradox/scala/cluster-sharding.md +++ b/akka-docs/src/main/paradox/scala/cluster-sharding.md @@ -39,7 +39,7 @@ Scala : @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #proxy-dc } Java -: @@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-actor } +: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-actor } The above actor uses event sourcing and the support provided in @scala[`PersistentActor`] @java[`AbstractPersistentActor`] to store its state. It does not have to be a persistent actor, but in case of failure or migration of entities between nodes it must be able to recover @@ -56,7 +56,7 @@ Scala : @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-start } Java -: @@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-start } +: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-start } The @scala[`extractEntityId` and `extractShardId` are two] @java[`messageExtractor` defines] application specific @scala[functions] @java[methods] to extract the entity identifier and the shard identifier from incoming messages. @@ -65,7 +65,7 @@ Scala : @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-extractor } Java -: @@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-extractor } +: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-extractor } This example illustrates two different ways to define the entity identifier in the messages: @@ -74,7 +74,7 @@ This example illustrates two different ways to define the entity identifier in t sent to the entity actor is wrapped in the envelope. Note how these two messages types are handled in the @scala[`extractEntityId` function] @java[`entityId` and `entityMessage` methods] shown above. -The message sent to the entity actor is @scala[the second part of the tuple returned by the `extractEntityId`] @[what `entityMessage` returns] and that makes it possible to unwrap envelopes +The message sent to the entity actor is @scala[the second part of the tuple returned by the `extractEntityId`] @java[what `entityMessage` returns] and that makes it possible to unwrap envelopes if needed. A shard is a group of entities that will be managed together. The grouping is defined by the @@ -103,14 +103,14 @@ Scala : @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-usage } Java -: @@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-usage } +: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-usage } @@@ div { .group-scala } A more comprehensive sample is available in the tutorial named [Akka Cluster Sharding with Scala!](https://github.com/typesafehub/activator-akka-cluster-sharding-scala). -@@@ +@@@ ## How it works @@ -188,7 +188,7 @@ must be to begin the rebalancing. This strategy can be replaced by an applicatio implementation. The state of shard locations in the `ShardCoordinator` is persistent (durable) with -@ref:[Distributed Data](distributed-data.md) or @ref:[Persistence](persistence.md) to survive failures. When a crashed or +@ref:[Distributed Data](distributed-data.md) or @ref:[Persistence](persistence.md) to survive failures. When a crashed or unreachable coordinator node has been removed (via down) from the cluster a new `ShardCoordinator` singleton actor will take over and the state is recovered. During such a failure period shards with known location are still available, while messages for new (unknown) shards @@ -298,7 +298,7 @@ Scala : @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #extractShardId-StartEntity } Java -: @@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #extractShardId-StartEntity } +: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #extractShardId-StartEntity } When configured to remember entities, whenever a `Shard` is rebalanced onto another node or recovers after a crash it will recreate all the entities which were previously @@ -308,7 +308,7 @@ restarted after the entity restart backoff specified in the configuration. When [Distributed Data mode](#cluster-sharding-mode) is used the identifiers of the entities are stored in @ref:[Durable Storage](distributed-data.md#ddata-durable) of Distributed Data. You may want to change the -configuration of the akka.cluster.sharding.distributed-data.durable.lmdb.dir`, since +configuration of the `akka.cluster.sharding.distributed-data.durable.lmdb.dir`, since the default directory contains the remote port of the actor system. If using a dynamically assigned port (0) it will be different each time and the previously stored data will not be loaded. @@ -335,7 +335,7 @@ Scala : @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #supervisor } Java -: @@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #supervisor } +: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #supervisor } You start such a supervisor in the same way as if it was the entity actor. @@ -343,13 +343,13 @@ Scala : @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #counter-supervisor-start } Java -: @@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #counter-supervisor-start } +: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #counter-supervisor-start } Note that stopped entities will be started again when a new message is targeted to the entity. ## Graceful Shutdown -You can send the @scala[`ShardRegion.GracefulShutdown`] @java[`ShardRegion.gracefulShutdownInstance`] message +You can send the @scala[`ShardRegion.GracefulShutdown`] @java[`ShardRegion.gracefulShutdownInstance`] message to the `ShardRegion` actor to handoff all shards that are hosted by that `ShardRegion` and then the `ShardRegion` actor will be stopped. You can `watch` the `ShardRegion` actor to know when it is completed. During this period other regions will buffer messages for those shards in the same way as when a rebalance is @@ -442,7 +442,7 @@ if needed. @@snip [reference.conf]($akka$/akka-cluster-sharding/src/main/resources/reference.conf) { #sharding-ext-config } Custom shard allocation strategy can be defined in an optional parameter to -`ClusterSharding.start`. See the API documentation of @scala[`ShardAllocationStrategy`] @java[`AbstractShardAllocationStrategy`] for details +`ClusterSharding.start`. See the API documentation of @scala[`ShardAllocationStrategy`] @java[`AbstractShardAllocationStrategy`] for details of how to implement a custom shard allocation strategy. ## Inspecting cluster sharding state @@ -467,5 +467,5 @@ When doing rolling upgrades special care must be taken to not change any of the * the `extractShardId` function * the role that the shard regions run on * the persistence mode - - If any one of these needs a change it will require a full cluster restart. \ No newline at end of file + + If any one of these needs a change it will require a full cluster restart. diff --git a/akka-docs/src/main/paradox/scala/cluster-usage.md b/akka-docs/src/main/paradox/scala/cluster-usage.md index 5d74d26169..d9467b3540 100644 --- a/akka-docs/src/main/paradox/scala/cluster-usage.md +++ b/akka-docs/src/main/paradox/scala/cluster-usage.md @@ -6,14 +6,21 @@ For introduction to the Akka Cluster concepts please see @ref:[Cluster Specifica The Akka cluster is a separate jar file. Make sure that you have the following dependency in your project: -Scala +sbt : @@@vars ``` "com.typesafe.akka" %% "akka-cluster" % "$akka.version$" ``` @@@ -Java +gradle +: @@@vars + ``` + compile group: 'com.typesafe.akka', name: 'akka-cluster_$scala.binary_version$', version: '$akka.version$' + ``` + @@@ + +maven : @@@vars ``` @@ -599,7 +606,7 @@ akka.actor.deployment { cluster { enabled = on allow-local-routees = on - use-role = compute + use-roles = ["compute"] } } } @@ -615,7 +622,7 @@ the router will try to use them as soon as the member status is changed to 'Up'. The actor paths without address information that are defined in `routees.paths` are used for selecting the actors to which the messages will be forwarded to by the router. Messages will be forwarded to the routees using @ref:[ActorSelection](actors.md#actorselection), so the same delivery semantics should be expected. -It is possible to limit the lookup of routees to member nodes tagged with a certain role by specifying `use-role`. +It is possible to limit the lookup of routees to member nodes tagged with a particular set of roles by specifying `use-roles`. `max-total-nr-of-instances` defines total number of routees in the cluster. By default `max-total-nr-of-instances` is set to a high value (10000) that will result in new routees added to the router when nodes join the cluster. @@ -686,7 +693,7 @@ akka.actor.deployment { cluster { enabled = on allow-local-routees = on - use-role = compute + use-roles = ["compute"] } } } @@ -715,14 +722,14 @@ akka.actor.deployment { enabled = on max-nr-of-instances-per-node = 3 allow-local-routees = on - use-role = compute + use-roles = ["compute"] } } } ``` -It is possible to limit the deployment of routees to member nodes tagged with a certain role by -specifying `use-role`. +It is possible to limit the deployment of routees to member nodes tagged with a particular set of roles by +specifying `use-roles`. `max-total-nr-of-instances` defines total number of routees in the cluster, but the number of routees per node, `max-nr-of-instances-per-node`, will not be exceeded. By default `max-total-nr-of-instances` @@ -790,7 +797,7 @@ akka.actor.deployment { enabled = on max-nr-of-instances-per-node = 3 allow-local-routees = on - use-role = compute + use-roles = ["compute"] } } } @@ -869,7 +876,7 @@ the actor system for a specific role. This can also be used to grab the `akka.ac ## How to Test Currently testing with the `sbt-multi-jvm` plugin is only documented for Scala. -Go to the corresponding @ref[Scala page](../scala/cluster-usage.md#how-to-test) for details. +Go to the corresponding @ref:[Scala page](../scala/cluster-usage.md#how-to-test) for details. @@@ diff --git a/akka-docs/src/main/paradox/scala/common/binary-compatibility-rules.md b/akka-docs/src/main/paradox/scala/common/binary-compatibility-rules.md index fedc14ca3e..7eaaff7241 100644 --- a/akka-docs/src/main/paradox/scala/common/binary-compatibility-rules.md +++ b/akka-docs/src/main/paradox/scala/common/binary-compatibility-rules.md @@ -40,6 +40,8 @@ OK: 3.1.n --> 3.2.0 ... ### Cases where binary compatibility is not retained +If a security vulnerability is reported in Akka or a transient dependency of Akka and it cannot be solved without breaking binary compatibility then fixing the security issue is more important. In such cases binary compatibility might not be retained when releasing a minor version. Such exception is always noted in the release announcement. + Some modules are excluded from the binary compatibility guarantees, such as: * `*-testkit` modules - since these are to be used only in tests, which usually are re-compiled and run on demand diff --git a/akka-docs/src/main/paradox/scala/dispatchers.md b/akka-docs/src/main/paradox/scala/dispatchers.md index 4931946d6d..f60c659101 100644 --- a/akka-docs/src/main/paradox/scala/dispatchers.md +++ b/akka-docs/src/main/paradox/scala/dispatchers.md @@ -143,6 +143,13 @@ Another example that uses the thread pool based on the number of cores (e.g. for @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) {#my-thread-pool-dispatcher-config } +A different kind of dispatcher that uses an affinity pool may increase throughput in cases where there is relatively small +number of actors that maintain some internal state. The affinity pool tries its best to ensure that an actor is always +scheduled to run on the same thread. This actor to thread pinning aims to decrease CPU cache misses which can result +in significant throughput improvement. + +@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #affinity-pool-dispatcher-config } + Configuring a `PinnedDispatcher`: @@ -344,7 +351,7 @@ they were still served on the default dispatcher. This is the recommended way of dealing with any kind of blocking in reactive applications. -For a similar discussion specific about Akka HTTP refer to, @extref:[Handling blocking operations in Akka HTTP](akka.http:java/http/handling-blocking-operations-in-akka-http-routes.html#solution-dedicated-dispatcher-for-blocking-operations). +For a similar discussion specific about Akka HTTP refer to, @scala[@extref[Handling blocking operations in Akka HTTP](akka.http:scala/http/handling-blocking-operations-in-akka-http-routes.html#handling-blocking-operations-in-akka-http)]@java[@extref[Handling blocking operations in Akka HTTP](akka.http:java/http/handling-blocking-operations-in-akka-http-routes.html#handling-blocking-operations-in-akka-http)]. ### Available solutions to blocking operations diff --git a/akka-docs/src/main/paradox/scala/distributed-data.md b/akka-docs/src/main/paradox/scala/distributed-data.md index 3c22efbdfb..8192e14b0d 100644 --- a/akka-docs/src/main/paradox/scala/distributed-data.md +++ b/akka-docs/src/main/paradox/scala/distributed-data.md @@ -41,7 +41,11 @@ Below is an example of an actor that schedules tick messages to itself and for e adds or removes elements from a `ORSet` (observed-remove set). It also subscribes to changes of this. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #data-bot } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #data-bot } + +Java +: @@snip [DataBot.java]($code$/java/jdocs/ddata/DataBot.java) { #data-bot } ### Update @@ -55,7 +59,7 @@ will then be replicated according to the given consistency level. The `modify` function is called by the `Replicator` actor and must therefore be a pure function that only uses the data parameter and stable fields from enclosing scope. It must -for example not access `sender()` reference of an enclosing actor. +for example not access the sender (@scala[`sender()`]@java[`getSender()`]) reference of an enclosing actor. `Update` is intended to only be sent from an actor running in same local @@ -66,7 +70,7 @@ for example not access `sender()` reference of an enclosing actor. You supply a write consistency level which has the following meaning: - * `WriteLocal` the value will immediately only be written to the local replica, + * @scala[`WriteLocal`]@java[`writeLocal`] the value will immediately only be written to the local replica, and later disseminated with gossip * `WriteTo(n)` the value will immediately be written to at least `n` replicas, including the local replica @@ -83,7 +87,11 @@ are prefered over unreachable nodes. Note that `WriteMajority` has a `minCap` parameter that is useful to specify to achieve better safety for small clusters. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update } As reply of the `Update` a `Replicator.UpdateSuccess` is sent to the sender of the `Update` if the value was successfully replicated according to the supplied consistency @@ -92,9 +100,18 @@ sent back. Note that a `Replicator.UpdateTimeout` reply does not mean that the u or was rolled back. It may still have been replicated to some nodes, and will eventually be replicated to all nodes with the gossip protocol. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-response1 } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-response1 } -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-response2 } +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-response1 } + + +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-response2 } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-response2 } You will always see your own writes. For example if you send two `Update` messages changing the value of the same `key`, the `modify` function of the second message will @@ -105,7 +122,11 @@ does not care about, but is included in the reply messages. This is a convenient way to pass contextual information (e.g. original sender) without having to use `ask` or maintain local correlation data structures. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-request-context } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #update-request-context } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #update-request-context } ### Get @@ -113,7 +134,7 @@ or maintain local correlation data structures. To retrieve the current value of a data you send `Replicator.Get` message to the `Replicator`. You supply a consistency level which has the following meaning: - * `ReadLocal` the value will only be read from the local replica + * @scala[`ReadLocal`]@java[`readLocal`] the value will only be read from the local replica * `ReadFrom(n)` the value will be read and merged from `n` replicas, including the local replica * `ReadMajority` the value will be read and merged from a majority of replicas, i.e. @@ -124,16 +145,29 @@ at least **N/2 + 1** replicas, where N is the number of nodes in the cluster Note that `ReadMajority` has a `minCap` parameter that is useful to specify to achieve better safety for small clusters. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get } As reply of the `Get` a `Replicator.GetSuccess` is sent to the sender of the `Get` if the value was successfully retrieved according to the supplied consistency level within the supplied timeout. Otherwise a `Replicator.GetFailure` is sent. If the key does not exist the reply will be `Replicator.NotFound`. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-response1 } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-response1 } -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-response2 } +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-response1 } + + +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-response2 } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-response2 } You will always read your own writes. For example if you send a `Update` message followed by a `Get` of the same `key` the `Get` will retrieve the change that was @@ -145,17 +179,21 @@ In the `Get` message you can pass an optional request context in the same way as `Update` message, described above. For example the original sender can be passed and replied to after receiving and transforming `GetSuccess`. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-request-context } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #get-request-context } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #get-request-context } ### Consistency The consistency level that is supplied in the [Update](#replicator-update) and [Get](#replicator-get) specifies per request how many replicas that must respond successfully to a write and read request. -For low latency reads you use `ReadLocal` with the risk of retrieving stale data, i.e. updates +For low latency reads you use @scala[`ReadLocal`]@java[`readLocal`] with the risk of retrieving stale data, i.e. updates from other nodes might not be visible yet. -When using `WriteLocal` the update is only written to the local replica and then disseminated +When using @scala[`WriteLocal`]@java[`writeLocal`] the update is only written to the local replica and then disseminated in the background with the gossip protocol, which can take few seconds to spread to all nodes. `WriteAll` and `ReadAll` is the strongest consistency level, but also the slowest and with @@ -197,11 +235,25 @@ the total size of the cluster. Here is an example of using `WriteMajority` and `ReadMajority`: -@@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #read-write-majority } +Scala +: @@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #read-write-majority } -@@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #get-cart } +Java +: @@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #read-write-majority } -@@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #add-item } + +Scala +: @@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #get-cart } + +Java +: @@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #get-cart } + + +Scala +: @@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #add-item } + +Java +: @@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #add-item } In some rare cases, when performing an `Update` it is needed to first try to fetch latest data from other nodes. That can be done by first sending a `Get` with `ReadMajority` and then continue with @@ -213,7 +265,11 @@ performed (hence the name observed-removed set). The following example illustrates how to do that: -@@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #remove-item } +Scala +: @@snip [ShoppingCart.scala]($code$/scala/docs/ddata/ShoppingCart.scala) { #remove-item } + +Java +: @@snip [ShoppingCart.java]($code$/java/jdocs/ddata/ShoppingCart.java) { #remove-item } @@@ warning @@ -238,7 +294,11 @@ immediately. The subscriber is automatically removed if the subscriber is terminated. A subscriber can also be deregistered with the `Replicator.Unsubscribe` message. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #subscribe } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #subscribe } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #subscribe } ### Delete @@ -259,7 +319,11 @@ In the *Delete* message you can pass an optional request context in the same way *Update* message, described above. For example the original sender can be passed and replied to after receiving and transforming *DeleteSuccess*. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #delete } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #delete } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #delete } @@@ warning @@ -300,10 +364,10 @@ akka.cluster.distributed-data.delta-crdt.enabled=off ## Data Types -The data types must be convergent (stateful) CRDTs and implement the `ReplicatedData` trait, +The data types must be convergent (stateful) CRDTs and implement the @scala[`ReplicatedData` trait]@java[`AbstractReplicatedData` interface], i.e. they provide a monotonic merge function and the state changes always converge. -You can use your own custom `ReplicatedData` or `DeltaReplicatedData` types, and several types are provided +You can use your own custom @scala[`ReplicatedData` or `DeltaReplicatedData`]@java[`AbstractReplicatedData` or `AbstractDeltaReplicatedData`] types, and several types are provided by this package, such as: * Counters: `GCounter`, `PNCounter` @@ -325,7 +389,11 @@ It is tracking the increments (P) separate from the decrements (N). Both P and N as two internal `GCounter`. Merge is handled by merging the internal P and N counters. The value of the counter is the value of the P counter minus the value of the N counter. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #pncounter } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #pncounter } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #pncounter } `GCounter` and `PNCounter` have support for [delta-CRDT](#delta-crdt) and don't need causal delivery of deltas. @@ -335,7 +403,11 @@ When the counters are placed in a `PNCounterMap` as opposed to placing them as s values they are guaranteed to be replicated together as one unit, which is sometimes necessary for related data. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #pncountermap } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #pncountermap } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #pncountermap } ### Sets @@ -343,7 +415,11 @@ If you only need to add elements to a set and not remove elements the `GSet` (gr the data type to use. The elements can be any type of values that can be serialized. Merge is simply the union of the two sets. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #gset } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #gset } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #gset } `GSet` has support for [delta-CRDT](#delta-crdt) and it doesn't require causal delivery of deltas. @@ -356,7 +432,11 @@ The version for the node that added the element is also tracked for each element called "birth dot". The version vector and the dots are used by the `merge` function to track causality of the operations and resolve concurrent updates. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #orset } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #orset } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #orset } `ORSet` has support for [delta-CRDT](#delta-crdt) and it requires causal delivery of deltas. @@ -400,7 +480,11 @@ There is ongoing work aimed at removing necessity of creation of the aforementio that despite having the same Scala type, `ORMultiMap.emptyWithValueDeltas` is not compatible with 'vanilla' `ORMultiMap`, because of different replication mechanism. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #ormultimap } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #ormultimap } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #ormultimap } When a data entry is changed the full state of that entry is replicated to other nodes, i.e. when you update a map the whole map is replicated. Therefore, instead of using one `ORMap` @@ -419,7 +503,11 @@ in the below section about `LWWRegister`. `Flag` is a data type for a boolean value that is initialized to `false` and can be switched to `true`. Thereafter it cannot be changed. `true` wins over `false` in merge. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #flag } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #flag } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #flag } `LWWRegister` (last writer wins register) can hold any (serializable) value. @@ -430,13 +518,21 @@ value is not important for concurrent updates occurring within the clock skew. Merge takes the register updated by the node with lowest address (`UniqueAddress` is ordered) if the timestamps are exactly the same. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #lwwregister } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #lwwregister } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #lwwregister } Instead of using timestamps based on `System.currentTimeMillis()` time it is possible to use a timestamp value based on something else, for example an increasing version number from a database record that is used for optimistic concurrency control. -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #lwwregister-custom-clock } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #lwwregister-custom-clock } + +Java +: @@snip [DistributedDataDocTest.java]($code$/java/jdocs/ddata/DistributedDataDocTest.java) { #lwwregister-custom-clock } For first-write-wins semantics you can use the `LWWRegister#reverseClock` instead of the `LWWRegister#defaultClock`. @@ -451,7 +547,7 @@ changing and writing the value with `WriteMajority` (or more). ### Custom Data Type You can rather easily implement your own data types. The only requirement is that it implements -the `merge` function of the `ReplicatedData` trait. +the @scala[`merge`]@java[`mergeData`] function of the @scala[`ReplicatedData`]@java[`AbstractReplicatedData`] trait. A nice property of stateful CRDTs is that they typically compose nicely, i.e. you can combine several smaller data types to build richer data structures. For example, the `PNCounter` is composed of @@ -461,11 +557,15 @@ Here is s simple implementation of a custom `TwoPhaseSet` that is using two inte to keep track of addition and removals. A `TwoPhaseSet` is a set where an element may be added and removed, but never added again thereafter. -@@snip [TwoPhaseSet.scala]($code$/scala/docs/ddata/TwoPhaseSet.scala) { #twophaseset } +Scala +: @@snip [TwoPhaseSet.scala]($code$/scala/docs/ddata/TwoPhaseSet.scala) { #twophaseset } + +Java +: @@snip [TwoPhaseSet.java]($code$/java/jdocs/ddata/TwoPhaseSet.java) { #twophaseset } Data types should be immutable, i.e. "modifying" methods should return a new instance. -Implement the additional methods of `DeltaReplicatedData` if it has support for delta-CRDT replication. +Implement the additional methods of @scala[`DeltaReplicatedData`]@java[`AbstractDeltaReplicatedData`] if it has support for delta-CRDT replication. #### Serialization @@ -485,19 +585,31 @@ This is a protobuf representation of the above `TwoPhaseSet`: The serializer for the `TwoPhaseSet`: -@@snip [TwoPhaseSetSerializer.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala) { #serializer } +Scala +: @@snip [TwoPhaseSetSerializer.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala) { #serializer } + +Java +: @@snip [TwoPhaseSetSerializer.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer.java) { #serializer } Note that the elements of the sets are sorted so the SHA-1 digests are the same for the same elements. You register the serializer in configuration: -@@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #serializer-config } +Scala +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #serializer-config } + +Java +: @@snip [DistributedDataDocSpec.scala]($code$/scala/docs/ddata/DistributedDataDocSpec.scala) { #japi-serializer-config } Using compression can sometimes be a good idea to reduce the data size. Gzip compression is -provided by the `akka.cluster.ddata.protobuf.SerializationSupport` trait: +provided by the @scala[`akka.cluster.ddata.protobuf.SerializationSupport` trait]@java[`akka.cluster.ddata.protobuf.AbstractSerializationSupport` interface]: -@@snip [TwoPhaseSetSerializer.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala) { #compression } +Scala +: @@snip [TwoPhaseSetSerializer.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala) { #compression } + +Java +: @@snip [TwoPhaseSetSerializerWithCompression.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializerWithCompression.java) { #compression } The two embedded `GSet` can be serialized as illustrated above, but in general when composing new data types from the existing built in types it is better to make use of the existing @@ -510,7 +622,11 @@ by the `SerializationSupport` trait to serialize and deserialize the `GSet` inst works with any type that has a registered Akka serializer. This is how such an serializer would look like for the `TwoPhaseSet`: -@@snip [TwoPhaseSetSerializer2.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala) { #serializer } +Scala +: @@snip [TwoPhaseSetSerializer2.scala]($code$/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala) { #serializer } + +Java +: @@snip [TwoPhaseSetSerializer2.java]($code$/java/jdocs/ddata/protobuf/TwoPhaseSetSerializer2.java) { #serializer } ### Durable Storage @@ -536,14 +652,15 @@ All entries can be made durable by specifying: akka.cluster.distributed-data.durable.keys = ["*"] ``` -[LMDB](https://symas.com/products/lightning-memory-mapped-database/) is the default storage implementation. It is +@scala[[LMDB](https://symas.com/products/lightning-memory-mapped-database/)]@java[[LMDB](https://github.com/lmdbjava/lmdbjava/)] is the default storage implementation. It is possible to replace that with another implementation by implementing the actor protocol described in `akka.cluster.ddata.DurableStore` and defining the `akka.cluster.distributed-data.durable.store-actor-class` property for the new implementation. The location of the files for the data is configured with: -``` +Scala +: ``` # Directory of LMDB file. There are two options: # 1. A relative or absolute path to a directory that ends with 'ddata' # the full name of the directory will contain name of the ActorSystem @@ -553,6 +670,18 @@ The location of the files for the data is configured with: akka.cluster.distributed-data.durable.lmdb.dir = "ddata" ``` +Java +: ``` +# Directory of LMDB file. There are two options: +# 1. A relative or absolute path to a directory that ends with 'ddata' +# the full name of the directory will contain name of the ActorSystem +# and its remote port. +# 2. Otherwise the path is used as is, as a relative or absolute path to +# a directory. +akka.cluster.distributed-data.durable.lmdb.dir = "ddata" +``` + + When running in production you may want to configure the directory to a specific path (alt 2), since the default directory contains the remote port of the actor system to make the name unique. If using a dynamically assigned @@ -599,7 +728,7 @@ API documentation of the `Replicator` for details. ## Samples Several interesting samples are included and described in the -tutorial named @extref[Akka Distributed Data Samples with Scala](ecs:akka-samples-distributed-data-scala) (@extref[source code](samples:akka-sample-distributed-data-scala)) +tutorial named @scala[@extref[Akka Distributed Data Samples with Scala](ecs:akka-samples-distributed-data-scala) (@extref[source code](samples:akka-sample-distributed-data-scala))]@java[@extref[Akka Distributed Data Samples with Java](ecs:akka-samples-distributed-data-java) (@extref[source code](samples:akka-sample-distributed-data-java))] * Low Latency Voting Service * Highly Available Shopping Cart @@ -650,7 +779,7 @@ sbt "com.typesafe.akka" %% "akka-distributed-data" % "$akka.version$" ``` @@@ - + Maven : @@@vars ``` @@ -666,4 +795,4 @@ Maven The `DistributedData` extension can be configured with the following properties: -@@snip [reference.conf]($akka$/akka-distributed-data/src/main/resources/reference.conf) { #distributed-data } \ No newline at end of file +@@snip [reference.conf]($akka$/akka-distributed-data/src/main/resources/reference.conf) { #distributed-data } diff --git a/akka-docs/src/main/paradox/scala/distributed-pub-sub.md b/akka-docs/src/main/paradox/scala/distributed-pub-sub.md index 75b58b95c2..55e18c5b92 100644 --- a/akka-docs/src/main/paradox/scala/distributed-pub-sub.md +++ b/akka-docs/src/main/paradox/scala/distributed-pub-sub.md @@ -30,9 +30,13 @@ any other node. There a two different modes of message delivery, explained in the sections [Publish](#distributed-pub-sub-publish) and [Send](#distributed-pub-sub-send) below. +@@@ div { .group-scala } + A more comprehensive sample is available in the tutorial named [Akka Clustered PubSub with Scala!](https://github.com/typesafehub/activator-akka-clustering). +@@@ + ## Publish @@ -43,7 +47,7 @@ Actors are registered to a named topic. This enables many subscribers on each no The message will be delivered to all subscribers of the topic. For efficiency the message is sent over the wire only once per node (that has a matching topic), -and then delivered to all subscribers of the local topic representation. (See more in ) +and then delivered to all subscribers of the local topic representation. You register actors to the local mediator with `DistributedPubSubMediator.Subscribe`. Successful `Subscribe` and `Unsubscribe` is acknowledged with @@ -59,20 +63,36 @@ can explicitly remove entries with `DistributedPubSubMediator.Unsubscribe`. An example of a subscriber actor: -@@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #subscriber } +Scala +: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #subscriber } + +Java +: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #subscriber } Subscriber actors can be started on several nodes in the cluster, and all will receive messages published to the "content" topic. -@@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #start-subscribers } +Scala +: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #start-subscribers } + +Java +: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #start-subscribers } A simple actor that publishes to this "content" topic: -@@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #publisher } +Scala +: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #publisher } + +Java +: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #publisher } It can publish messages to the topic from anywhere in the cluster: -@@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #publish-message } +Scala +: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #publish-message } + +Java +: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #publish-message } ### Topic Groups @@ -109,7 +129,7 @@ cluster aware router where the routees dynamically can register themselves. The message will be delivered to one recipient with a matching path, if any such exists in the registry. If several entries match the path because it has been registered on several nodes the message will be sent via the supplied `RoutingLogic` (default random) -to one destination. The sender() of the message can specify that local affinity is preferred, +to one destination. The sender of the message can specify that local affinity is preferred, i.e. the message is sent to an actor in the same local actor system as the used mediator actor, if any such exists, otherwise route to any other matching entry. @@ -128,20 +148,36 @@ can explicitly remove entries with `DistributedPubSubMediator.Remove`. An example of a destination actor: -@@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #send-destination } +Scala +: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #send-destination } + +Java +: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #send-destination } Destination actors can be started on several nodes in the cluster, and all will receive messages sent to the path (without address information). -@@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #start-send-destinations } +Scala +: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #start-send-destinations } + +Java +: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #start-send-destinations } A simple actor that sends to the path: -@@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #sender } +Scala +: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #sender } + +Java +: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #sender } It can send messages to the path from anywhere in the cluster: -@@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #send-message } +Scala +: @@snip [DistributedPubSubMediatorSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala) { #send-message } + +Java +: @@snip [DistributedPubSubMediatorTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/pubsub/DistributedPubSubMediatorTest.java) { #send-message } It is also possible to broadcast messages to the actors that have been registered with `Put`. Send `DistributedPubSubMediator.SendToAll` message to the local mediator and the wrapped message @@ -202,4 +238,4 @@ Maven $akka.version$ ``` - @@@ \ No newline at end of file + @@@ diff --git a/akka-docs/src/main/paradox/scala/fault-tolerance.md b/akka-docs/src/main/paradox/scala/fault-tolerance.md index ed9d70b8c3..36e0e6c842 100644 --- a/akka-docs/src/main/paradox/scala/fault-tolerance.md +++ b/akka-docs/src/main/paradox/scala/fault-tolerance.md @@ -16,14 +16,6 @@ Read the following source code. The inlined comments explain the different piece the fault handling and why they are added. It is also highly recommended to run this sample as it is easy to follow the log output to understand what is happening at runtime. -@@toc { depth=1 } - -@@@ index - -* [fault-tolerance-sample](fault-tolerance-sample.md) - -@@@ - ## Creating a Supervisor Strategy The following sections explain the fault handling mechanism and alternatives diff --git a/akka-docs/src/main/paradox/scala/futures.md b/akka-docs/src/main/paradox/scala/futures.md index bb18b5fea1..22496e03ff 100644 --- a/akka-docs/src/main/paradox/scala/futures.md +++ b/akka-docs/src/main/paradox/scala/futures.md @@ -6,14 +6,27 @@ In the Scala Standard Library, a [Future](http://en.wikipedia.org/wiki/Futures_a used to retrieve the result of some concurrent operation. This result can be accessed synchronously (blocking) or asynchronously (non-blocking). +@@@ div { .group-java } + +To be able to use this from Java, Akka provides a java friendly interface +in `akka.dispatch.Futures`. + +See also @ref:[Java 8 Compatibility](java8-compat.md) for Java compatibility. + +@@@ + ## Execution Contexts In order to execute callbacks and operations, Futures need something called an `ExecutionContext`, which is very similar to a `java.util.concurrent.Executor`. if you have an `ActorSystem` in scope, it will use its default dispatcher as the `ExecutionContext`, or you can use the factory methods provided -by the `ExecutionContext` companion object to wrap `Executors` and `ExecutorServices`, or even create your own. +by the @scala[`ExecutionContext` companion object]@java[`ExecutionContexts` class] to wrap `Executors` and `ExecutorServices`, or even create your own. -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #diy-execution-context } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #diy-execution-context } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports1 #diy-execution-context } ### Within Actors @@ -22,24 +35,45 @@ dispatcher doubles as an `ExecutionContext`. If the nature of the Future calls invoked by the actor matches or is compatible with the activities of that actor (e.g. all CPU bound and no latency requirements), then it may be easiest to reuse the dispatcher for running the Futures by importing -`context.dispatcher`. +@scala[`context.dispatcher`]@java[`getContext().dispatcher()`]. -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #context-dispatcher } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #context-dispatcher } -## Use With Actors +Java +: @@snip [ActorWithFuture.java]($code$/java/jdocs/future/ActorWithFuture.java) { #context-dispatcher } -There are generally two ways of getting a reply from an `Actor`: the first is by a sent message (`actor ! msg`), -which only works if the original sender was an `Actor`) and the second is through a `Future`. +## Use with Actors -Using an `Actor`'s `?` method to send a message will return a `Future`: +There are generally two ways of getting a reply from an @scala[`Actor`]@java[`AbstractActor`]: the first is by a sent message (@scala[`actor ! msg`]@java[`actorRef.tell(msg, sender)`]), +which only works if the original sender was an @scala[`Actor`]@java[`AbstractActor`]) and the second is through a `Future`. -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #ask-blocking } +Using @scala[an `Actor`'s `?`]@java[the `ActorRef`'s `ask`] method to send a message will return a `Future`. +To wait for and retrieve the actual result the simplest method is: -This will cause the current thread to block and wait for the `Actor` to 'complete' the `Future` with it's reply. +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #ask-blocking } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports1 #ask-blocking } + +This will cause the current thread to block and wait for the @scala[`Actor`]@java[`AbstractActor`] to 'complete' the `Future` with it's reply. Blocking is discouraged though as it will cause performance problems. The blocking operations are located in `Await.result` and `Await.ready` to make it easy to spot where blocking occurs. Alternatives to blocking are discussed further within this documentation. Also note that the `Future` returned by -an `Actor` is a `Future[Any]` since an `Actor` is dynamic. That is why the `asInstanceOf` is used in the above sample. +an @scala[`Actor`]@java[`AbstractActor`] is a @scala[`Future[Any]`]@java[`Future`] since an @scala[`Actor`]@java[`AbstractActor`] is dynamic. +That is why the @scala[`asInstanceOf`]@java[cast to `String`] is used in the above sample. + +@@@ warning + +`Await.result` and `Await.ready` are provided for exceptional situations where you **must** block, +a good rule of thumb is to only use them if you know why you **must** block. For all other cases, use +asynchronous composition as described below. + +@@@ + +@@@ div { .group-scala } + When using non-blocking it is better to use the `mapTo` method to safely try to cast a `Future` to an expected type: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #map-to } @@ -47,34 +81,64 @@ When using non-blocking it is better to use the `mapTo` method to safely try to The `mapTo` method will return a new `Future` that contains the result if the cast was successful, or a `ClassCastException` if not. Handling `Exception`s will be discussed further within this documentation. +@@@ + To send the result of a `Future` to an `Actor`, you can use the `pipe` construct: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to } +scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #pipe-to } + +java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #pipe-to } ## Use Directly -A common use case within Akka is to have some computation performed concurrently without needing the extra utility of an `Actor`. -If you find yourself creating a pool of `Actor`s for the sole reason of performing a calculation in parallel, +A common use case within Akka is to have some computation performed concurrently without needing the extra utility of an @scala[`Actor`]@java[`AbstractActor`]. +If you find yourself creating a pool of @scala[`Actor`s]@java[`AbstractActor`s] for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #future-eval } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #future-eval } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports2 #future-eval } In the above code the block passed to `Future` will be executed by the default `Dispatcher`, with the return value of the block used to complete the `Future` (in this case, the result would be the string: "HelloWorld"). -Unlike a `Future` that is returned from an `Actor`, this `Future` is properly typed, -and we also avoid the overhead of managing an `Actor`. +Unlike a `Future` that is returned from an @scala[`Actor`]@java[`AbstractActor`], this `Future` is properly typed, +and we also avoid the overhead of managing an @scala[`Actor`]@java[`AbstractActor`]. -You can also create already completed Futures using the `Future` companion, which can be either successes: +You can also create already completed Futures using the @scala[`Future` companion]@java[`Futures` class], which can be either successes: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #successful } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #successful } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #successful } Or failures: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #failed } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #failed } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #failed } It is also possible to create an empty `Promise`, to be filled later, and obtain the corresponding `Future`: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #promise } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #promise } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #promise } + +@@@ div { .group-java } + +For these examples `PrintResult` is defined as follows: + +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #print-result } + +@@@ ## Functional Futures @@ -83,19 +147,28 @@ These allow you to create 'pipelines' or 'streams' that the result will travel t ### Future is a Monad -The first method for working with `Future` functionally is `map`. This method takes a `Function` +The first method for working with `Future` functionally is `map`. This method takes a @scala[`Function`]@java[`Mapper`] which performs some operation on the result of the `Future`, and returning a new result. The return value of the `map` method is another `Future` that will contain the new result: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #map } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #map } -In this example we are joining two strings together within a `Future`. Instead of waiting for this to complete, +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports2 #map } + +In this example we are joining two strings together within a `Future`. Instead of waiting for @scala[`this`]@java[`f1`] to complete, we apply our function that calculates the length of the string using the `map` method. -Now we have a second `Future` that will eventually contain an `Int`. -When our original `Future` completes, it will also apply our function and complete the second `Future` with its result. +Now we have a second `Future`, `f2`, that will eventually contain an @scala[`Int`]@java[`Integer`]. +When our original `Future`, `f1`, completes, it will also apply our function and complete the second `Future` with its result. When we finally get the result, it will contain the number 10. Our original `Future` still contains the string "HelloWorld" and is unaffected by the `map`. +Something to note when using these methods: passed work is always dispatched on the provided `ExecutionContext`. Even if +the `Future` has already been completed, when one of these methods is called. + +@@@ div { .group-scala } + The `map` method is fine if we are modifying a single `Future`, but if 2 or more `Future`s are involved `map` will not allow you to combine them together: @@ -123,8 +196,12 @@ each step of the for comprehension is run sequentially. This will happen on sepa there isn't much benefit over running the calculations all within a single `Future`. The real benefit comes when the `Future`s are created first, and then combining them together. +@@@ + ### Composing Futures +@@@ div { .group-scala } + The example for comprehension above is an example of composing `Future`s. A common use case for this is combining the replies of several `Actor`s into a single calculation without resorting to calling `Await.result` or `Await.ready` to block for each result. @@ -132,14 +209,6 @@ First an example of using `Await.result`: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #composing-wrong } -@@@ warning - -`Await.result` and `Await.ready` are provided for exceptional situations where you **must** block, -a good rule of thumb is to only use them if you know why you **must** block. For all other cases, use -asynchronous composition as described below. - -@@@ - Here we wait for the results from the first 2 `Actor`s before sending that result to the third `Actor`. We called `Await.result` 3 times, which caused our little program to block 3 times before getting our final result. Now compare that to this example: @@ -155,16 +224,36 @@ The `sequence` and `traverse` helper methods can make it easier to handle more c Both of these methods are ways of turning, for a subclass `T` of `Traversable`, `T[Future[A]]` into a `Future[T[A]]`. For example: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #sequence-ask } +@@@ -To better explain what happened in the example, `Future.sequence` is taking the `List[Future[Int]]` -and turning it into a `Future[List[Int]]`. We can then use `map` to work with the `List[Int]` directly, -and we find the sum of the `List`. +@@@ div { .group-java } -The `traverse` method is similar to `sequence`, but it takes a `T[A]` and a function `A => Future[B]` to return a `Future[T[B]]`, -where `T` is again a subclass of Traversable. For example, to use `traverse` to sum the first 100 odd numbers: +It is very often desirable to be able to combine different Futures with each other, +below are some examples on how that can be done in a non-blocking fashion. -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #traverse } +@@@ + +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #sequence-ask } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports3 #sequence } + +To better explain what happened in the example, `Future.sequence` is taking the @scala[`List[Future[Int]]`]@java[`Iterable>`] +and turning it into a @scala[`Future[List[Int]]`]@java[`Future>`]. We can then use `map` to work with the @scala[`List[Int]`]@java[`Iterable`] directly, +and we aggregate the sum of the @scala[`List`]@java[`Iterable`]. + +The `traverse` method is similar to `sequence`, but it takes a sequence of `A` and applies a function @scala[`A => Future[B]`]@java[from `A` to `Future`] +@scala[to return a `Future[T[B]]` where `T` is again a subclass of Traversable. For example, to use `traverse` to sum the first 100 odd numbers:] +@java[and returns a `Future>`, enabling parallel map over the sequence, if you use `Futures.future` to create the `Future`.] + +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #traverse } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports4 #traverse } + +@@@ div { .group-scala } This is the same result as this example: @@ -172,12 +261,19 @@ This is the same result as this example: But it may be faster to use `traverse` as it doesn't have to create an intermediate `List[Future[Int]]`. -Then there's a method that's called `fold` that takes a start-value, a sequence of `Future`s and a function -from the type of the start-value and the type of the futures and returns something with the same type as the start-value, -and then applies the function to all elements in the sequence of futures, asynchronously, -the execution will start when the last of the Futures is completed. +@@@ -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #fold } +Then there's a method that's called `fold` that takes a start-value, +a sequence of `Future`s and a function from the type of the start-value, a timeout, +and the type of the futures and returns something with the same type as the start-value, +and then applies the function to all elements in the sequence of futures, non-blockingly, +the execution will be started when the last of the Futures is completed. + +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #fold } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports5 #fold } That's all it takes! @@ -185,7 +281,11 @@ If the sequence passed to `fold` is empty, it will return the start-value, in th In some cases you don't have a start-value and you're able to use the value of the first completing `Future` in the sequence as the start-value, you can use `reduce`, it works like this: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #reduce } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #reduce } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports6 #reduce } Same as with `fold`, the execution will be done asynchronously when the last of the `Future` is completed, you can also parallelize it by chunking your futures into sub-sequences and reduce them, and then reduce the reduced results again. @@ -193,13 +293,27 @@ you can also parallelize it by chunking your futures into sub-sequences and redu ## Callbacks Sometimes you just want to listen to a `Future` being completed, and react to that not by creating a new `Future`, but by side-effecting. -For this Scala supports `onComplete`, `onSuccess` and `onFailure`, of which the last two are specializations of the first. +For this `Future` supports `onComplete`, `onSuccess` and `onFailure`, of which the last two are specializations of the first. -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onSuccess } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onSuccess } -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onFailure } +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onSuccess } -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onComplete } + +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onFailure } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onFailure } + + +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #onComplete } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #onComplete } ## Define Ordering @@ -209,31 +323,47 @@ But there's a solution and it's name is `andThen`. It creates a new `Future` wit the specified callback, a `Future` that will have the same result as the `Future` it's called on, which allows for ordering like in the following sample: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #and-then } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #and-then } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #and-then } ## Auxiliary Methods `Future` `fallbackTo` combines 2 Futures into a new `Future`, and will hold the successful value of the second `Future` if the first `Future` fails. -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #fallback-to } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #fallback-to } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #fallback-to } You can also combine two Futures into a new `Future` that will hold a tuple of the two Futures successful results, using the `zip` operation. -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #zip } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #zip } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #zip } ## Exceptions Since the result of a `Future` is created concurrently to the rest of the program, exceptions must be handled differently. -It doesn't matter if an `Actor` or the dispatcher is completing the `Future`, +It doesn't matter if an @scala[`Actor`]@java[`AbstractActor`] or the dispatcher is completing the `Future`, if an `Exception` is caught the `Future` will contain it instead of a valid result. If a `Future` does contain an `Exception`, calling `Await.result` will cause it to be thrown again so it can be handled properly. It is also possible to handle an `Exception` by returning a different result. This is done with the `recover` method. For example: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #recover } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #recover } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #recover } In this example, if the actor replied with a `akka.actor.Status.Failure` containing the `ArithmeticException`, our `Future` would have a result of 0. The `recover` method works very similarly to the standard try/catch blocks, @@ -243,10 +373,116 @@ it will behave as if we hadn't used the `recover` method. You can also use the `recoverWith` method, which has the same relationship to `recover` as `flatMap` has to `map`, and is use like this: -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #try-recover } +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #try-recover } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #try-recover } ## After `akka.pattern.after` makes it easy to complete a `Future` with a value or exception after a timeout. -@@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #after } \ No newline at end of file +Scala +: @@snip [FutureDocSpec.scala]($code$/scala/docs/future/FutureDocSpec.scala) { #after } + +Java +: @@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #imports7 #after } + +@@@ div { .group-java } + +## Java 8, CompletionStage and CompletableFuture + +Starting with Akka 2.4.2 we have begun to introduce Java 8 `java.util.concurrent.CompletionStage` in Java APIs. +It's a `scala.concurrent.Future` counterpart in Java; conversion from `scala.concurrent.Future` is done using +`scala-java8-compat` library. + +Unlike `scala.concurrent.Future` which has async methods only, `CompletionStage` has *async* and *non-async* methods. + +The `scala-java8-compat` library returns its own implementation of `CompletionStage` which delegates all *non-async* +methods to their *async* counterparts. The implementation extends standard Java `CompletableFuture`. +Java 8 `CompletableFuture` creates a new instance of `CompletableFuture` for any new stage, +which means `scala-java8-compat` implementation is not used after the first mapping method. + +@@@ + +@@@ note { .group-java } + +After adding any additional computation stage to `CompletionStage` returned by `scala-java8-compat` +(e.g. `CompletionStage` instances returned by Akka) it falls back to standard behaviour of Java `CompletableFuture`. + +@@@ + +@@@ div { .group-java } + +Actions supplied for dependent completions of *non-async* methods may be performed by the thread +that completes the current `CompletableFuture`, or by any other caller of a completion method. + +All *async* methods without an explicit Executor are performed using the `ForkJoinPool.commonPool()` executor. + +@@@ + +@@@ div { .group-java } + +### Non-async methods + +When non-async methods are applied on a not yet completed `CompletionStage`, they are completed by +the thread which completes initial `CompletionStage`: + +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-completion-thread } + +In this example Scala `Future` is converted to `CompletionStage` just like Akka does. +The completion is delayed: we are calling `thenApply` multiple times on a not yet complete `CompletionStage`, then +complete the `Future`. + +First `thenApply` is actually performed on `scala-java8-compat` instance and computational stage (lambda) execution +is delegated to default Java `thenApplyAsync` which is executed on `ForkJoinPool.commonPool()`. + +Second and third `thenApply` methods are executed on Java 8 `CompletableFuture` instance which executes computational +stages on the thread which completed the first stage. It is never executed on a thread of Scala `Future` because +default `thenApply` breaks the chain and executes on `ForkJoinPool.commonPool()`. + +In the next example `thenApply` methods are executed on an already completed `Future`/`CompletionStage`: + +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-main-thread } + +First `thenApply` is still executed on `ForkJoinPool.commonPool()` (because it is actually `thenApplyAsync` +which is always executed on global Java pool). + +Then we wait for stages to complete so second and third `thenApply` are executed on completed `CompletionStage`, +and stages are executed on the current thread - the thread which called second and third `thenApply`. + +@@@ + +@@@ div { .group-java } + +### Async methods + +As mentioned above, default *async* methods are always executed on `ForkJoinPool.commonPool()`: + +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-async-default } + +`CompletionStage` also has *async* methods which take `Executor` as a second parameter, just like `Future`: + +@@snip [FutureDocTest.java]($code$/java/jdocs/future/FutureDocTest.java) { #apply-async-executor } + +This example is behaving like `Future`: every stage is executed on an explicitly specified `Executor`. + +@@@ + +@@@ note { .group-java } + +When in doubt, async methods with explicit executor should be used. Always async methods with a dedicated +executor/dispatcher for long-running or blocking computations, such as IO operations. + +@@@ + +@@@ div { .group-java } + +See also: + + * [CompletionStage](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html) + * [CompletableFuture](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html) + * [scala-java8-compat](https://github.com/scala/scala-java8-compat) + +@@@ diff --git a/akka-docs/src/main/paradox/scala/general/configuration.md b/akka-docs/src/main/paradox/scala/general/configuration.md index beb6613dff..ad85e67297 100644 --- a/akka-docs/src/main/paradox/scala/general/configuration.md +++ b/akka-docs/src/main/paradox/scala/general/configuration.md @@ -55,7 +55,7 @@ to `application`—may be overridden using the `config.resource` property @@@ note -If you are writing an Akka application, keep you configuration in +If you are writing an Akka application, keep your configuration in `application.conf` at the root of the class path. If you are writing an Akka-based library, keep its configuration in `reference.conf` at the root of the JAR file. diff --git a/akka-docs/src/main/paradox/scala/guide/actors-intro.md b/akka-docs/src/main/paradox/scala/guide/actors-intro.md index 8da4deaa46..4271327531 100644 --- a/akka-docs/src/main/paradox/scala/guide/actors-intro.md +++ b/akka-docs/src/main/paradox/scala/guide/actors-intro.md @@ -1,179 +1,25 @@ -# What problems does the actor model solve? +# How the Actor Model Meets the Needs of Modern, Distributed Systems -Akka uses the actor model to overcome the limitations of traditional object-oriented programming models and meet the -unique challenges of highly distributed systems. To fully understand why the actor model is necessary, it helps to -identify mismatches between traditional approaches to programming and the realities of concurrent and distributed -computing. +As described in the previous topic, common programming practices do not properly +address the needs of demanding modern systems. Thankfully, we +don't need to scrap everything we know. Instead, the actor model addresses these +shortcomings in a principled way, allowing systems to behave in a way that +better matches our mental model. The actor model abstraction +allows you to think about your code in terms of communication, not unlike the +exchanges that occur between people in a large organization. -### The illusion of encapsulation - -Object-oriented programming (OOP) is a widely-accepted, familiar programming model. One of its core pillars is -_encapsulation_. Encapsulation dictates that the internal data of an object is not accessible directly from the outside; -it can only be modified by invoking a set of curated methods. The object is responsible for exposing safe operations -that protect the invariant nature of its encapsulated data. - -For example, operations on an ordered binary tree implementation must not allow violation of the tree ordering -invariant. Callers expect the ordering to be intact and when querying the tree for a certain piece of -data, they need to be able to rely on this constraint. - -When we analyze OOP runtime behavior, we sometimes draw a message sequence chart showing the interactions of -method calls. For example: - -![sequence chart](diagrams/seq_chart.png) - -Unfortunately, the above diagram does not accurately represent the _lifelines_ of the instances during execution. -In reality, a _thread_ executes all these calls, and the enforcement of invariants occurs on the same thread from -which the method was called. Updating the diagram with the thread of execution, it looks like this: - -![sequence chart with thread](diagrams/seq_chart_thread.png) - -The significance of this clarification becomes clear when you try to model what happens with _multiple threads_. -Suddenly, our neatly drawn diagram becomes inadequate. We can try to illustrate multiple threads accessing -the same instance: - -![sequence chart with threads interacting](diagrams/seq_chart_multi_thread.png) - -There is a section of execution where two threads enter the same method. Unfortunately, the encapsulation model -of objects does not guarantee anything about what happens in that section. Instructions of the two invocations -can be interleaved in arbitrary ways which eliminate any hope for keeping the invariants intact without some -type of coordination between two threads. Now, imagine this issue compounded by the existence of many threads. - -The common approach to solving this problem is to add a lock around these methods. While this ensures that at most -one thread will enter the method at any given time, this is a very costly strategy: - - * Locks _seriously limit_ concurrency, they are very costly on modern CPU architectures, - requiring heavy-lifting from the operating system to suspend the thread and restore it later. - * The caller thread is now blocked, so it cannot do any other meaningful work. Even in desktop applications this is - unacceptable, we want to keep user-facing parts of applications (its UI) to be responsive even when a - long background job is running. In the backend, blocking is outright wasteful. - One might think that this can be compensated by launching new threads, but threads are also a costly abstraction. - * Locks introduce a new menace: deadlocks. - -These realities result in a no-win situation: - - * Without sufficient locks, the state gets corrupted. - * With many locks in place, performance suffers and very easily leads to deadlocks. - -Additionally, locks only really work well locally. When it comes to coordinating across multiple machines, -the only alternative is distributed locks. Unfortunately, distributed locks are several magnitudes less efficient -than local locks and usually impose a hard limit on scaling out. Distributed lock protocols require several -communication round-trips over the network across multiple machines, so latency goes through the roof. - -In Object Oriented languages we rarely think about threads or linear execution paths in general. -We often envision a system as a network of object instances that react to method calls, modify their internal state, -then communicate with each other via method calls driving the whole application state forward: - -![network of interacting objects](diagrams/object_graph.png) - -However, in a multi-threaded distributed environment, what actually happens is that threads "traverse" this network of object instances by following method calls. -As a result, threads are what really drive execution: - -![network of interactive objects traversed by threads](diagrams/object_graph_snakes.png) - -**In summary**: - - * **Objects can only guarantee encapsulation (protection of invariants) in the face of single-threaded access, - multi-thread execution almost always leads to corrupted internal state. Every invariant can be violated by - having two contending threads in the same code segment.** - * **While locks seem to be the natural remedy to uphold encapsulation with multiple threads, in practice they - are inefficient and easily lead to deadlocks in any application of real-world scale.** - * **Locks work locally, attempts to make them distributed exist, but offer limited potential for scaling out.** - -### The illusion of shared memory on modern computer architectures - -Programming models of the 80'-90's conceptualize that writing to a variable means writing to a memory location directly -(which somewhat muddies the water that local variables might exist only in registers). On modern architectures - -if we simplify things a bit - CPUs are writing to [cache lines](https://en.wikipedia.org/wiki/CPU_cache) -instead of writing to memory directly. Most of these caches are local to the CPU core, that is, writes by one core -are not visible by another. In order to make local changes visible to another core, and hence to another thread, -the cache line needs to be shipped to the other core's cache. - -On the JVM, we have to explicitly denote memory locations to be shared across threads by using _volatile_ markers -or `Atomic` wrappers. Otherwise, we can access them only in a locked section. Why don't we just mark all variables as -volatile? Because shipping cache lines across cores is a very costly operation! Doing so would implicitly stall the cores -involved from doing additional work, and result in bottlenecks on the cache coherence protocol (the protocol CPUs -use to transfer cache lines between main memory and other CPUs). -The result is magnitudes of slowdown. - -Even for developers aware of this situation, figuring out which memory locations should be marked as volatile, -or which atomic structures to use is a dark art. - -**In summary**: - - * **There is no real shared memory anymore, CPU cores pass chunks of data (cache lines) explicitly to each other - just as computers on a network do. Inter-CPU communication and network communication have more in common than many realize. Passing messages is the norm now be it across CPUs or networked computers.** - * **Instead of hiding the message passing aspect through variables marked as shared or using atomic data structures, - a more disciplined and principled approach is to keep state local to a concurrent entity and propagate data or events - between concurrent entities explicitly via messages.** - -### The illusion of a call stack - -Today, we often take call stacks for granted. But, they were invented in an era where concurrent programming -was not as important because multi-CPU systems were not common. Call stacks do not cross threads and hence, -do not model asynchronous call chains. - -The problem arises when a thread intends to delegate a task to the "background". In practice, this really means -delegating to another thread. This cannot be a simple method/function call because calls are strictly local to the -thread. What usually happens, is that the "caller" puts an object into a memory location shared by a worker thread -("callee"), which in turn, picks it up in some event loop. This allows the "caller" thread to move on and do other tasks. - -The first issue is, how can the "caller" be notified of the completion of the task? But a more serious issue arises -when a task fails with an exception. Where does the exception propagate to? It will propagate to the exception handler -of the worker thread completely ignoring who the actual "caller" was: - -![exceptions cannot propagate between different threads](diagrams/exception_prop.png) - -This is a serious problem. How does the worker thread deal with the situation? It likely cannot fix the issue as it is -usually oblivious of the purpose of the failed task. The "caller" thread needs to be notified somehow, -but there is no call stack to unwind with an exception. Failure notification can only be done via a side-channel, -for example putting an error code where the "caller" thread otherwise expects the result once ready. -If this notification is not in place, the "caller" never gets notified of a failure and the task is lost! -**This is surprisingly similar to how networked systems work where messages/requests can get lost/fail without any -notification.** - -This bad situation gets worse when things go really wrong and a worker backed by a thread encounters a bug and ends -up in an unrecoverable situation. For example, an internal exception caused by a bug bubbles up to the root of -the thread and makes the thread shut down. This immediately raises the question, who should restart the normal operation -of the service hosted by the thread, and how should it be restored to a known-good state? At first glance, -this might seem manageable, but we are suddenly faced by a new, unexpected phenomena: the actual task, -that the thread was currently working on, is no longer in the shared memory location where tasks are taken from -(usually a queue). In fact, due to the exception reaching to the top, unwinding all of the call stack, -the task state is fully lost! **We have lost a message even though this is local communication with no networking -involved (where message losses are to be expected).** - -**In summary:** - - * **To achieve any meaningful concurrency and performance on current systems, threads must delegate tasks among each - other in an efficient way without blocking. With this style of task-delegating concurrency - (and even more so with networked/distributed computing) call stack-based error handling breaks down and new, - explicit error signaling mechanisms need to be introduced. Failures become part of the domain model.** - * **Concurrent systems with work delegation needs to handle service faults and have principled means to recover from them. - Clients of such services need to be aware that tasks/messages might get lost during restarts. - Even if loss does not happen, a response might be delayed arbitrarily due to previously enqueued tasks - (a long queue), delays caused by garbage collection, etc. In face of these, concurrent systems should handle response - deadlines in the form of timeouts, just like networked/distributed systems.** - -## How the actor model meets the needs of concurrent, distributed systems - -As described in the sections above, common programming practices cannot properly address the needs of modern concurrent -and distributed systems. -Thankfully, we don't need to scrap everything we know. Instead, the actor model addresses these shortcomings in a -principled way, allowing systems to behave in a way that better matches our mental model. - -In particular, we would like to: +Use of actors allows us to: * Enforce encapsulation without resorting to locks. - * Use the model of cooperative entities reacting to signals, changing state and sending signals to each other + * Use the model of cooperative entities reacting to signals, changing state, and sending signals to each other to drive the whole application forward. * Stop worrying about an executing mechanism which is a mismatch to our world view. -The actor model accomplishes all of these goals. The following topics describe how. - ### Usage of message passing avoids locking and blocking Instead of calling methods, actors send messages to each other. Sending a message does not transfer the thread of execution from the sender to the destination. An actor can send a message and continue without blocking. -It can, therefore, do more work, send and receive messages. +Therefore, it can accomplish more in the same amount of time. With objects, when a method returns, it releases control of its executing thread. In this respect, actors behave much like objects, they react to messages and return execution when they finish processing the current message. @@ -181,8 +27,8 @@ In this way, actors actually achieve the execution we imagined for objects: ![actors interact with each other by sending messages](diagrams/actor_graph.png) -An important difference of passing messages instead of calling methods is that messages have no return value. -By sending a message, an actor delegates work to another actor. As we saw in @ref:[The illusion of a call stack](actors-intro.md#the-illusion-of-a-call-stack), +An important difference between passing messages and calling methods is that messages have no return value. +By sending a message, an actor delegates work to another actor. As we saw in @ref:[The illusion of a call stack](actors-motivation.md#the-illusion-of-a-call-stack), if it expected a return value, the sending actor would either need to block or to execute the other actor's work on the same thread. Instead, the receiving actor delivers the results in a reply message. @@ -190,8 +36,9 @@ The second key change we need in our model is to reinstate encapsulation. Actors "react" to methods invoked on them. The difference is that instead of multiple threads "protruding" into our actor and wreaking havoc to internal state and invariants, actors execute independently from the senders of a message, and they react to incoming messages sequentially, one at a time. While each actor processes messages sent to it sequentially, -different actors work concurrently with each other so an actor system can process as many messages simultaneously -as many processor cores are available on the machine. Since there is always at most one message being processed per actor +different actors work concurrently with each other so that an actor system can process as many messages simultaneously as the hardware will support. + +Since there is always at most one message being processed per actor, the invariants of an actor can be kept without synchronization. This happens automatically without using locks: ![messages don't invalidate invariants as they are processed sequentially](diagrams/serialized_timeline_invariants.png) @@ -207,15 +54,15 @@ In summary, this is what happens when an actor receives a message: To accomplish this behavior, actors have: - * A Mailbox (the queue where messages end up). - * A Behavior (the state of the actor, internal variables etc.). + * A mailbox (the queue where messages end up). + * A behavior (the state of the actor, internal variables etc.). * Messages (pieces of data representing a signal, similar to method calls and their parameters). - * An Execution Environment (the machinery that takes actors that have messages to react to and invokes + * An execution environment (the machinery that takes actors that have messages to react to and invokes their message handling code). - * An Address (more on this later). + * An address (more on this later). -Messages are put into so-called Mailboxes of Actors. The Behavior of the actor describes how the actor responds to -messages (like sending more messages and/or changing state). An Execution Environment orchestrates a pool of threads +Messages go into actor mailboxes. The behavior of the actor describes how the actor responds to +messages (like sending more messages and/or changing state). An execution environment orchestrates a pool of threads to drive all these actions completely transparently. This is a very simple model and it solves the issues enumerated previously: @@ -231,7 +78,7 @@ This is a very simple model and it solves the issues enumerated previously: ### Actors handle error situations gracefully -Since we have no longer a shared call stack between actors that send messages to each other, we need to handle +Since we no longer have a shared call stack between actors that send messages to each other, we need to handle error situations differently. There are two kinds of errors we need to consider: * The first case is when the delegated task on the target actor failed due to an error in the task (typically some @@ -250,3 +97,5 @@ others. Children never go silently dead (with the notable exception of entering either failing and their parent can react to the fault, or they are stopped (in which case interested parties are automatically notified). There is always a responsible entity for managing an actor: its parent. Restarts are not visible from the outside: collaborating actors can keep continuing sending messages while the target actor restarts. + +Now, let's take a short tour of the functionality Akka provides. diff --git a/akka-docs/src/main/paradox/scala/guide/actors-motivation.md b/akka-docs/src/main/paradox/scala/guide/actors-motivation.md new file mode 100644 index 0000000000..5869a9e1b2 --- /dev/null +++ b/akka-docs/src/main/paradox/scala/guide/actors-motivation.md @@ -0,0 +1,159 @@ +# Why modern systems need a new programming model + +The actor model was proposed decades ago by @extref[Carl Hewitt](wikipedia:Carl_Hewitt#Actor_model) as a way to handle parallel processing in a high performance network — an environment that was not available at the time. Today, hardware and infrastructure capabilities have caught up with and exceeded Hewitt's vision. Consequently, organizations building distributed systems with demanding requirements encounter challenges that cannot fully be solved with a traditional object-oriented programming (OOP) model, but that can benefit from the actor model. + +Today, the actor model is not only recognized as a highly effective solution — it has been proven in production for some of the world's most demanding applications. To highlight issues that the actor model addresses, this topic discusses the following mismatches between traditional programming assumptions and the reality of modern multi-threaded, multi-CPU architectures: + +* [The challenge of encapsulation](#the-illusion-of-encapsulation) +* [The illusion of shared memory on modern computer architectures](#The-illusion-of-shared-memory-on-modern-computer-architectures) +* [The illustion of a call stack](#the-illusion-of-a-call-stack) + + +## The challenge of encapsulation + +A core pillar of OOP is _encapsulation_. Encapsulation dictates that the internal data of an object is not accessible directly from the outside; +it can only be modified by invoking a set of curated methods. The object is responsible for exposing safe operations +that protect the invariant nature of its encapsulated data. + +For example, operations on an ordered binary tree implementation must not allow violation of the tree ordering +invariant. Callers expect the ordering to be intact and when querying the tree for a certain piece of +data, they need to be able to rely on this constraint. + +When we analyze OOP runtime behavior, we sometimes draw a message sequence chart showing the interactions of +method calls. For example: + +![sequence chart](diagrams/seq_chart.png) + +Unfortunately, the above diagram does not accurately represent the _lifelines_ of the instances during execution. +In reality, a _thread_ executes all these calls, and the enforcement of invariants occurs on the same thread from +which the method was called. Updating the diagram with the thread of execution, it looks like this: + +![sequence chart with thread](diagrams/seq_chart_thread.png) + +The significance of this clarification becomes clear when you try to model what happens with _multiple threads_. +Suddenly, our neatly drawn diagram becomes inadequate. We can try to illustrate multiple threads accessing +the same instance: + +![sequence chart with threads interacting](diagrams/seq_chart_multi_thread.png) + +There is a section of execution where two threads enter the same method. Unfortunately, the encapsulation model +of objects does not guarantee anything about what happens in that section. Instructions of the two invocations +can be interleaved in arbitrary ways which eliminate any hope for keeping the invariants intact without some +type of coordination between two threads. Now, imagine this issue compounded by the existence of many threads. + +The common approach to solving this problem is to add a lock around these methods. While this ensures that at most +one thread will enter the method at any given time, this is a very costly strategy: + + * Locks _seriously limit_ concurrency, they are very costly on modern CPU architectures, + requiring heavy-lifting from the operating system to suspend the thread and restore it later. + * The caller thread is now blocked, so it cannot do any other meaningful work. Even in desktop applications this is + unacceptable, we want to keep user-facing parts of applications (its UI) to be responsive even when a + long background job is running. In the backend, blocking is outright wasteful. + One might think that this can be compensated by launching new threads, but threads are also a costly abstraction. + * Locks introduce a new menace: deadlocks. + +These realities result in a no-win situation: + + * Without sufficient locks, the state gets corrupted. + * With many locks in place, performance suffers and very easily leads to deadlocks. + +Additionally, locks only really work well locally. When it comes to coordinating across multiple machines, +the only alternative is distributed locks. Unfortunately, distributed locks are several magnitudes less efficient +than local locks and usually impose a hard limit on scaling out. Distributed lock protocols require several +communication round-trips over the network across multiple machines, so latency goes through the roof. + +In Object Oriented languages we rarely think about threads or linear execution paths in general. +We often envision a system as a network of object instances that react to method calls, modify their internal state, +then communicate with each other via method calls driving the whole application state forward: + +![network of interacting objects](diagrams/object_graph.png) + +However, in a multi-threaded distributed environment, what actually happens is that threads "traverse" this network of object instances by following method calls. +As a result, threads are what really drive execution: + +![network of interactive objects traversed by threads](diagrams/object_graph_snakes.png) + +**In summary**: + + * **Objects can only guarantee encapsulation (protection of invariants) in the face of single-threaded access, + multi-thread execution almost always leads to corrupted internal state. Every invariant can be violated by + having two contending threads in the same code segment.** + * **While locks seem to be the natural remedy to uphold encapsulation with multiple threads, in practice they + are inefficient and easily lead to deadlocks in any application of real-world scale.** + * **Locks work locally, attempts to make them distributed exist, but offer limited potential for scaling out.** + +## The illusion of shared memory on modern computer architectures + +Programming models of the 80'-90's conceptualize that writing to a variable means writing to a memory location directly +(which somewhat muddies the water that local variables might exist only in registers). On modern architectures - +if we simplify things a bit - CPUs are writing to @extref[cache lines](wikipedia:CPU_cache) +instead of writing to memory directly. Most of these caches are local to the CPU core, that is, writes by one core +are not visible by another. In order to make local changes visible to another core, and hence to another thread, +the cache line needs to be shipped to the other core's cache. + +On the JVM, we have to explicitly denote memory locations to be shared across threads by using _volatile_ markers +or `Atomic` wrappers. Otherwise, we can access them only in a locked section. Why don't we just mark all variables as +volatile? Because shipping cache lines across cores is a very costly operation! Doing so would implicitly stall the cores +involved from doing additional work, and result in bottlenecks on the cache coherence protocol (the protocol CPUs +use to transfer cache lines between main memory and other CPUs). +The result is magnitudes of slowdown. + +Even for developers aware of this situation, figuring out which memory locations should be marked as volatile, +or which atomic structures to use is a dark art. + +**In summary**: + + * **There is no real shared memory anymore, CPU cores pass chunks of data (cache lines) explicitly to each other + just as computers on a network do. Inter-CPU communication and network communication have more in common than many realize. Passing messages is the norm now be it across CPUs or networked computers.** + * **Instead of hiding the message passing aspect through variables marked as shared or using atomic data structures, + a more disciplined and principled approach is to keep state local to a concurrent entity and propagate data or events + between concurrent entities explicitly via messages.** + +## The illusion of a call stack + +Today, we often take call stacks for granted. But, they were invented in an era where concurrent programming +was not as important because multi-CPU systems were not common. Call stacks do not cross threads and hence, +do not model asynchronous call chains. + +The problem arises when a thread intends to delegate a task to the "background". In practice, this really means +delegating to another thread. This cannot be a simple method/function call because calls are strictly local to the +thread. What usually happens, is that the "caller" puts an object into a memory location shared by a worker thread +("callee"), which in turn, picks it up in some event loop. This allows the "caller" thread to move on and do other tasks. + +The first issue is, how can the "caller" be notified of the completion of the task? But a more serious issue arises +when a task fails with an exception. Where does the exception propagate to? It will propagate to the exception handler +of the worker thread completely ignoring who the actual "caller" was: + +![exceptions cannot propagate between different threads](diagrams/exception_prop.png) + +This is a serious problem. How does the worker thread deal with the situation? It likely cannot fix the issue as it is +usually oblivious of the purpose of the failed task. The "caller" thread needs to be notified somehow, +but there is no call stack to unwind with an exception. Failure notification can only be done via a side-channel, +for example putting an error code where the "caller" thread otherwise expects the result once ready. +If this notification is not in place, the "caller" never gets notified of a failure and the task is lost! +**This is surprisingly similar to how networked systems work where messages/requests can get lost/fail without any +notification.** + +This bad situation gets worse when things go really wrong and a worker backed by a thread encounters a bug and ends +up in an unrecoverable situation. For example, an internal exception caused by a bug bubbles up to the root of +the thread and makes the thread shut down. This immediately raises the question, who should restart the normal operation +of the service hosted by the thread, and how should it be restored to a known-good state? At first glance, +this might seem manageable, but we are suddenly faced by a new, unexpected phenomena: the actual task, +that the thread was currently working on, is no longer in the shared memory location where tasks are taken from +(usually a queue). In fact, due to the exception reaching to the top, unwinding all of the call stack, +the task state is fully lost! **We have lost a message even though this is local communication with no networking +involved (where message losses are to be expected).** + +**In summary:** + + * **To achieve any meaningful concurrency and performance on current systems, threads must delegate tasks among each + other in an efficient way without blocking. With this style of task-delegating concurrency + (and even more so with networked/distributed computing) call stack-based error handling breaks down and new, + explicit error signaling mechanisms need to be introduced. Failures become part of the domain model.** + * **Concurrent systems with work delegation needs to handle service faults and have principled means to recover from them. + Clients of such services need to be aware that tasks/messages might get lost during restarts. + Even if loss does not happen, a response might be delayed arbitrarily due to previously enqueued tasks + (a long queue), delays caused by garbage collection, etc. In face of these, concurrent systems should handle response + deadlines in the form of timeouts, just like networked/distributed systems.** + +Next, let's see how use of the actor model can overcome these challenges. diff --git a/akka-docs/src/main/paradox/scala/guide/index.md b/akka-docs/src/main/paradox/scala/guide/index.md index 0a26f4178b..a56196fca9 100644 --- a/akka-docs/src/main/paradox/scala/guide/index.md +++ b/akka-docs/src/main/paradox/scala/guide/index.md @@ -5,12 +5,14 @@ @@@ index * [introduction](introduction.md) + * [actors-motivation](actors-motivation.md) * [actors-intro](actors-intro.md) * [modules](modules.md) - * [quickstart](quickstart.md) + * [tutorial](tutorial.md) * [part1](tutorial_1.md) * [part2](tutorial_2.md) * [part3](tutorial_3.md) * [part4](tutorial_4.md) + * [part5](tutorial_5.md) @@@ diff --git a/akka-docs/src/main/paradox/scala/guide/introduction.md b/akka-docs/src/main/paradox/scala/guide/introduction.md index b1747c9451..6ca747948a 100644 --- a/akka-docs/src/main/paradox/scala/guide/introduction.md +++ b/akka-docs/src/main/paradox/scala/guide/introduction.md @@ -1,49 +1,42 @@ # Introduction to Akka -Welcome to Akka, a set of open-source libraries for designing scalable, resilient systems that -span processor cores and networks. Akka allows you to focus on meeting business needs instead -of writing low-level code to provide reliable behavior, fault tolerance, and high performance. +Welcome to Akka, a set of open-source libraries for designing scalable, resilient systems that span processor cores and networks. Akka allows you to focus on meeting business needs instead of writing low-level code to provide reliable behavior, fault tolerance, and high performance. -Common practices and programming models do not address important challenges inherent in designing systems -for modern computer architectures. To be successful, distributed systems must cope in an environment where components -crash without responding, messages get lost without a trace on the wire, and network latency fluctuates. -These problems occur regularly in carefully managed intra-datacenter environments - even more so in virtualized -architectures. +Many common practices and accepted programming models do not address important challenges +inherent in designing systems for modern computer architectures. To be +successful, distributed systems must cope in an environment where components +crash without responding, messages get lost without a trace on the wire, and +network latency fluctuates. These problems occur regularly in carefully managed +intra-datacenter environments - even more so in virtualized architectures. -To deal with these realities, Akka provides: +To help you deal with these realities, Akka provides: * Multi-threaded behavior without the use of low-level concurrency constructs like - atomics or locks. You do not even need to think about memory visibility issues. - * Transparent remote communication between systems and their components. You do - not need to write or maintain difficult networking code. - * A clustered, high-availability architecture that is elastic, scales in or out, on demand. + atomics or locks — relieving you from even thinking about memory visibility issues. + * Transparent remote communication between systems and their components — relieving you from writing and maintaining difficult networking code. + * A clustered, high-availability architecture that is elastic, scales in or out, on demand — enabling you to deliver a truly reactive system. -All of these features are available through a uniform programming model: Akka exploits the actor model -to provide a level of abstraction that makes it easier to write correct concurrent, parallel and distributed systems. -The actor model spans the set of Akka libraries, providing you with a consistent way of understanding and using them. -Thus, Akka offers a depth of integration that you cannot achieve by picking libraries to solve individual problems and -trying to piece them together. +Akka's use of the actor model provides a level of abstraction that makes it +easier to write correct concurrent, parallel and distributed systems. The actor +model spans the full set of Akka libraries, providing you with a consistent way +of understanding and using them. Thus, Akka offers a depth of integration that +you cannot achieve by picking libraries to solve individual problems and trying +to piece them together. -By learning Akka and its actor model, you will gain access to a vast and deep set of tools that solve difficult -distributed/parallel systems problems in a uniform programming model where everything fits together tightly and +By learning Akka and how to use the actor model, you will gain access to a vast +and deep set of tools that solve difficult distributed/parallel systems problems +in a uniform programming model where everything fits together tightly and efficiently. -## What is the Actor Model? +## How to get started -The characteristics of today's computing environments are vastly different from the ones in use when the programming -models of yesterday were conceived. Actors were invented decades ago by @extref[Carl Hewitt](wikipedia:Carl_Hewitt#Actor_model). -But relatively recently, their applicability to the challenges of modern computing systems has been recognized and -proved to be effective. +If this is your first experience with Akka, we recommend that you start by +running a simple Hello World project. See the @scala[[Quickstart Guide](http://developer.lightbend.com/guides/akka-quickstart-scala)] @java[[Quickstart Guide](http://developer.lightbend.com/guides/akka-quickstart-java)] for +instructions on downloading and running the Hello World example. The *Quickstart* guide walks you through example code that introduces how to define actor systems, actors, and messages as well as how to use the test module and logging. Within 30 minutes, you should be able to run the Hello World example and learn how it is constructed. -The actor model provides an abstraction that allows you to think about your code in terms of communication, not unlike -people in a large organization. The basic characteristic of actors is that they model the world as stateful entities -communicating with each other by explicit message passing. +This *Getting Started* guide provides the next level of information. It covers why the actor model fits the needs of modern distributed systems and includes a tutorial that will help further your knowledge of Akka. Topics include: -As computational entities, actors have these characteristics: - -* They communicate with asynchronous messaging instead of method calls -* They manage their own state -* When responding to a message, they can: - * Create other (child) actors - * Send messages to other actors - * Stop (child) actors or themselves +* @ref:[Why modern systems need a new programming model](actors-motivation.md) +* @ref:[How the actor model meets the needs of concurrent, distributed systems](actors-intro.md) +* @ref:[Overview of Akka libraries and modules](modules.md) +* A @ref:[more complex example](tutorial.md) that builds on the Hello World example to illustrate common Akka patterns. diff --git a/akka-docs/src/main/paradox/scala/guide/modules.md b/akka-docs/src/main/paradox/scala/guide/modules.md index 90412fc74a..0d4128fe7b 100644 --- a/akka-docs/src/main/paradox/scala/guide/modules.md +++ b/akka-docs/src/main/paradox/scala/guide/modules.md @@ -1,12 +1,31 @@ -# Akka Libraries and Modules +# Overview of Akka libraries and modules -Before we delve further into writing our first actors, we should stop for a moment and look at the set of libraries -that come out-of-the-box. This will help you identify which modules and libraries provide the functionality you -want to use in your system. +Before delving into some best practices for writing actors, it will be helpful to preview the most commonly used Akka libraries. This will help you start thinking about the functionality you want to use in your system. All core Akka functionality is available as Open Source Software (OSS). Lightbend sponsors Akka development but can also help you with [commercial offerings ](https://www.lightbend.com/platform/subscription) such as training, consulting, support, and [Enterprise Suite](https://www.lightbend.com/platform/production) — a comprehensive set of tools for managing Akka systems. -### Actors (`akka-actor` Library, the Core) +The following capabilities are included with Akka OSS and are introduced later on this page: -The use of actors across Akka libraries provides a consistent, integrated model that relieves you from individually +* [Actor library](#actor-library) +* [Remoting](#remoting) +* [Cluster](#cluster) +* [Cluster Sharding](#cluster-sharding) +* [Cluster Singleton](#cluster-singleton) +* [Cluster Publish-Subscribe](#cluster-publish-subscribe) +* [Persistence](#persistence) +* [Distributed Data](#distributed-data) +* [HTTP](#http) + +With a Lightbend subscription, you can use [Enterprise Suite](https://www.lightbend.com/platform/production) in production. Enterprise Suite includes the following extensions to Akka core functionality: + +* [Split Brain Resolver](https://developer.lightbend.com/docs/akka-commercial-addons/current/split-brain-resolver.html) — Detects and recovers from network partitions, eliminating data inconsistencies and possible downtime. +* [Configuration Checker](https://developer.lightbend.com/docs/akka-commercial-addons/current/config-checker.html) — Checks for potential configuration issues and logs suggestions. +* [Diagnostics Recorder](https://developer.lightbend.com/docs/akka-commercial-addons/current/diagnostics-recorder.html) — Captures configuration and system information in a format that makes it easy to troubleshoot issues during development and production. +* [Thread Starvation Detector](https://developer.lightbend.com/docs/akka-commercial-addons/current/starvation-detector.html) — Monitors an Akka system dispatcher and logs warnings if it becomes unresponsive. + +This page does not list all available modules, but overviews the main functionality and gives you an idea of the level of sophistication you can reach when you start building systems on top of Akka. + +### Actor library + +The core Akka library is `akka-actor`. But, actors are used across Akka libraries, providing a consistent, integrated model that relieves you from individually solving the challenges that arise in concurrent or distributed system design. From a birds-eye view, actors are a programming paradigm that takes encapsulation, one of the pillars of OOP, to its extreme. Unlike objects, actors encapsulate not only their @@ -17,7 +36,7 @@ yet, in the next chapter we will explain actors in detail. For now, the importan handles concurrency and distribution at the fundamental level instead of ad hoc patched attempts to bring these features to OOP. -Challenges that actors solve include: +Challenges that actors solve include the following: * How to build and design high-performance, concurrent applications. * How to handle errors in a multi-threaded environment. @@ -25,13 +44,13 @@ Challenges that actors solve include: ### Remoting -Remoting enables actors that are remote, living on different computers, to seamlessly exchange messages. +Remoting enables actors that live on different computers, to seamlessly exchange messages. While distributed as a JAR artifact, Remoting resembles a module more than it does a library. You enable it mostly -with configuration, it has only a few APIs. Thanks to the actor model, a remote and local message send looks exactly the +with configuration and it has only a few APIs. Thanks to the actor model, a remote and local message send looks exactly the same. The patterns that you use on local systems translate directly to remote systems. You will rarely need to use Remoting directly, but it provides the foundation on which the Cluster subsystem is built. -Some of the challenges Remoting solves are: +Challenges Remoting solves include the following: * How to address actor systems living on remote hosts. * How to address individual actors on remote actor systems. @@ -48,7 +67,7 @@ remote systems, Clustering gives you the ability to organize these into a "meta- protocol. **In most cases, you want to use the Cluster module instead of using Remoting directly.** Clustering provides an additional set of services on top of Remoting that most real world applications need. -The challenges the Cluster module solves, among others, are: +Challenges the Cluster module solves include the following: * How to maintain a set of actor systems (a cluster) that can communicate with each other and consider each other as part of the cluster. * How to introduce a new system safely to the set of already existing members. @@ -63,7 +82,7 @@ Sharding helps to solve the problem of distributing a set of actors among member Sharding is a pattern that mostly used together with Persistence to balance a large set of persistent entities (backed by actors) to members of a cluster and also migrate them to other nodes when members crash or leave. -The challenge space that Sharding targets: +Challenges that Sharding solves include the following: * How to model and scale out a large set of stateful entities on a set of systems. * How to ensure that entities in the cluster are distributed properly so that load is properly balanced across the machines. @@ -155,17 +174,10 @@ Some of the challenges that HTTP tackles: * How to stream large datasets in and out of a system using HTTP. * How to stream live events in and out of a system using HTTP. -*** +### Example of module use -The above is an incomplete list of all the available modules, but it gives a nice overview of the landscape of modules -and the level of sophistication you can reach when you start building systems on top of Akka. All these modules -integrate with together seamlessly. For example, take a large set of stateful business objects -(a document, a shopping cart, etc) that is accessed by on-line users of your website. Model these as sharded -entities using Sharding and Persistence to keep them balanced across a cluster that you can scale out on-demand -(for example during an advertising campaign before holidays) and keep them available even if some systems crash. -Take the real-time stream of domain events of your business objects with Persistence Query and use Streams to pipe -it into a streaming BigData engine. Take the output of that engine as a Stream, manipulate it using Akka Streams +Akka modules integrate together seamlessly. For example, think of a large set of stateful business objects, such as documents or shopping carts, that website users access. If you model these as sharded entities, using Sharding and Persistence, they will be balanced across a cluster that you can scale out on-demand. They will be available during spikes that come from advertising campaigns or before holidays will be handled, even if some systems crash. You can also easily take the real-time stream of domain events with Persistence Query and use Streams to pipe them into a streaming Fast Data engine. Then, take the output of that engine as a Stream, manipulate it using Akka Streams operators and expose it as web socket connections served by a load balanced set of HTTP servers hosted by your cluster to power your real-time business analytics tool. -Hope this have gotten you interested? Keep on reading to learn more. +We hope this preview caught your interest! The next topic introduces the example application we will build in the tutorial portion of this guide. diff --git a/akka-docs/src/main/paradox/scala/guide/quickstart.md b/akka-docs/src/main/paradox/scala/guide/quickstart.md deleted file mode 100644 index ab21b7ea8b..0000000000 --- a/akka-docs/src/main/paradox/scala/guide/quickstart.md +++ /dev/null @@ -1,18 +0,0 @@ -# Quickstart - -After all this introduction, we are ready to build our first actor system. We will do so in five chapters. -This first chapter will help you to set up your project, tools and have a simple "Hello World" demo running. -We will keep this section to a bare minimum and then extend the sample application in the next chapter. - -> Our goal in this chapter is to set up a working environment for you, create an application that starts up and stops -an ActorSystem and create an actor which we will run and test. - -Akka requires that you have [Java 8](http://www.oracle.com/technetwork/java/javase/downloads/index.html) or -later installed on your machine. - -As the very first thing, we need to make sure that we can compile our project and have a working IDE setup to be -able to edit code comfortably. - -The easiest way is to use the @scala[[Akka Quickstart with Scala guide](http://developer.lightbend.com/guides/akka-quickstart-scala/)] @java[[Akka Quickstart with Java guide](http://developer.lightbend.com/guides/akka-quickstart-java/)]. It contains a Hello World example that illustrates Akka basics. Within 30 minutes, you should be able to download and run the example and use that guide to understand how the example is constructed. - -After that you can go back here and you are ready to dive deeper. diff --git a/akka-docs/src/main/paradox/scala/guide/tutorial.md b/akka-docs/src/main/paradox/scala/guide/tutorial.md new file mode 100644 index 0000000000..21c1340d9b --- /dev/null +++ b/akka-docs/src/main/paradox/scala/guide/tutorial.md @@ -0,0 +1,38 @@ +# Introduction to the Example + +When writing prose, the hardest part is often composing the first few sentences. There is a similar "blank canvas" feeling +when starting to build an Akka system. You might wonder: Which should be the first actor? Where should it live? What should it do? +Fortunately — unlike with prose — established best practices can guide us through these initial steps. In the remainder of this guide, we examine the core logic of a simple Akka application to introduce you to actors and show you how to formulate solutions with them. The example demonstrates common patterns that will help you kickstart your Akka projects. + +## Prerequisites +You should have already followed the instructions in the @scala[[Akka Quickstart with Scala guide](http://developer.lightbend.com/guides/akka-quickstart-scala/)] @java[[Akka Quickstart with Java guide](http://developer.lightbend.com/guides/akka-quickstart-java/)] to download and run the Hello World example. You will use this as a seed project and add the functionality described in this tutorial. + +## IoT example use case + +In this tutorial, we'll use Akka to build out part of an Internet of Things (IoT) system that reports data from sensor devices installed in customers' homes. The example focuses on temperature readings. The target use case simply allows customers to log in and view the last reported temperature from different areas of their homes. You can imagine that such sensors could also collect relative humidity or other interesting data and an application would likely support reading and changing device configuration, maybe even alerting home owners when sensor state falls outside of a particular range. + +In a real system, the application would be exposed to customers through a mobile app or browser. This guide concentrates only on the core logic for storing temperatures that would be called over a network protocol, such as HTTP. It also includes writing tests to help you get comfortable and proficient with testing actors. + +The tutorial application consists of two main components: + + * **Device data collection:** — maintains a local representation of the + remote devices. Multiple sensor devices for a home are organized into one device group. + * **User dashboard:** — periodically collects data from the devices for a + logged in user's home and presents the results as a report. + +The following diagram illustrates the example application architecture. Since we are interested in the state of each sensor device, we will model devices as actors. The running application will create as many instances of device actors and device groups as necessary. + +![box diagram of the architecture](diagrams/arch_boxes_diagram.png) + +## What you will learn in this tutorial +This tutorial introduces and illustrates: + +* The actor hierarchy and how it influences actor behavior +* How to choose the right granularity for actors +* How to define protocols as messages +* Typical conversational styles + + +Let's get started by learning more about actors. + + diff --git a/akka-docs/src/main/paradox/scala/guide/tutorial_1.md b/akka-docs/src/main/paradox/scala/guide/tutorial_1.md index 3ffe818b66..eee915e5f4 100644 --- a/akka-docs/src/main/paradox/scala/guide/tutorial_1.md +++ b/akka-docs/src/main/paradox/scala/guide/tutorial_1.md @@ -1,82 +1,31 @@ -# Part 1: Top-level Architecture +# Part 1: Actor Architecture -In this and the following chapters, we will build a sample Akka application to introduce you to the language of -actors and how solutions can be formulated with them. It is a common hurdle for beginners to translate their project -into actors even though they don't understand what they do on the high-level. We will build the core logic of a small -application and this will serve as a guide for common patterns that will help to kickstart Akka projects. +Use of Akka relieves you from creating the infrastructure for an actor system and from writing the low-level code necessary to control basic behavior. To appreciate this, let's look at the relationships between actors you create in your code and those that Akka creates and manages for you internally, the actor lifecycle, and failure handling. -The application we aim to write will be a simplified IoT system where devices, installed at the home of users, can report temperature data from sensors. Users will be able to query the current state of these sensors. To keep -things simple, we will not actually expose the application via HTTP or any other external API, we will, instead, concentrate only on the -core logic. However, we will write tests for the pieces of the application to get comfortable and -proficient with testing actors early on. +## The Akka actor hierarchy -## Our Goals for the IoT System - -We will build a simple IoT application with the bare essentials to demonstrate designing an Akka-based system. The application will consist of two main components: - - * **Device data collection:** This component has the responsibility to maintain a local representation of the - otherwise remote devices. The devices will be organized into device groups, grouping together sensors belonging to a home. - * **User dashboards:** This component has the responsibility to periodically collect data from the devices for a - logged in user and present the results as a report. - -For simplicity, we will only collect temperature data for the devices, but in a real application our local representations -for a remote device, which we will model as an actor, would have many more responsibilities. Among others; reading the -configuration of the device, changing the configuration, checking if the devices are unresponsive, etc. We leave -these complexities for now as they can be easily added as an exercise. - -We will also not address the means by which the remote devices communicate with the local representations (actors). Instead, -we just build an actor based API that such a network protocol could use. We will use tests for our API everywhere though. - -The architecture of the application will look like this: - -![box diagram of the architecture](diagrams/arch_boxes_diagram.png) - -## Top Level Architecture - -When writing prose, the hardest part is usually to write the first couple of sentences. There is a similar feeling -when trying to build an Akka system: What should be the first actor? Where should it live? What should it do? -Fortunately, unlike with prose, there are established best practices that can guide us through these initial steps. - -When one creates an actor in Akka it always belongs to a certain parent. This means that actors are always organized -into a tree. In general, creating an actor can only happen from inside another actor. This creator actor becomes the +An actor in Akka always belongs to a parent. Typically, you create an actor by calling @java[`getContext().actorOf()`]@scala[`context.actorOf()`]. Rather than creating a "freestanding" actor, this injects the new actor as a child into an already existing tree: the creator actor becomes the _parent_ of the newly created _child_ actor. You might ask then, who is the parent of the _first_ actor you create? -As we have seen in the previous chapters, to create a top-level actor one must call `system.actorOf()`. This does -not create a "freestanding" actor though, instead, it injects the corresponding actor as a child into an already -existing tree: + +As illustrated below, all actors have a common parent, the user guardian. New actor instances can be created under this actor using `system.actorOf()`. As we covered in the @scala[[Quickstart Guide](https://developer.lightbend.com/guides/akka-quickstart-scala/)]@java[[Quickstart Guide](https://developer.lightbend.com/guides/akka-quickstart-java/)], creation of an actor returns a reference that is a valid URL. So, for example, if we create an actor named `someActor` with `system.actorOf(…, "someActor")`, its reference will include the path `/user/someActor`. ![box diagram of the architecture](diagrams/actor_top_tree.png) -As you see, creating actors from the "top" injects those actors under the path `/user/`, so for example creating -an actor named `myActor` will end up having the path `/user/myActor`. In fact, there are three already existing -actors in the system: +In fact, before you create an actor in your code, Akka has already created three actors in the system. The names of these built-in actors contain _guardian_ because they _supervise_ every child actor in their path. The guardian actors include: - - `/` the so-called _root guardian_. This is the parent of all actors in the system, and the last one to stop - when the system itself is terminated. - - `/user` the _guardian_. **This is the parent actor for all user created actors**. The name `user` should not confuse - you, it has nothing to do with the logged in user, nor user handling in general. This name really means _userspace_ - as this is the place where actors that do not access Akka internals live, i.e. all the actors created by users of the Akka library. Every actor you will create will have the constant path `/user/` prepended to it. + - `/` the so-called _root guardian_. This is the parent of all actors in the system, and the last one to stop when the system itself is terminated. + - `/user` the _guardian_. **This is the parent actor for all user created actors**. Don't let the name `user` confuse + you, it has nothing to do with end users, nor with user handling. Every actor you create using the Akka library will have the constant path `/user/` prepended to it. - `/system` the _system guardian_. -The names of these built-in actors contain _guardian_ because these are _supervising_ every actor living as a child -of them, i.e. under their path. We will explain supervision in more detail, all you need to know now is that every -unhandled failure from actors bubbles up to their parent that, in turn, can decide how to handle this failure. These -predefined actors are guardians in the sense that they are the final lines of defense, where all unhandled failures -from user, or system, actors end up. +In the Hello World example, we have already seen how `system.actorOf()`, creates an actor directly under `/user`. We call this a _top level_ actor, even though, in practice it is only on the top of the +_user defined_ hierarchy. You typically have only one (or very few) top level actors in your `ActorSystem`. +We create child, or non-top-level, actors by invoking `context.actorOf()` from an existing actor. The `context.actorOf()` method has a signature identical to `system.actorOf()`, its top-level counterpart. -> Does the root guardian (the root path `/`) have a parent? As it turns out, it has. This special entity is called -> the "Bubble-Walker". This special entity is invisible for the user and only has uses internally. +The easiest way to see the actor hierarchy in action is to simply print `ActorRef` instances. In this small experiment, we create an actor, print its reference, create a child of this actor, and print the child's reference. We start with the Hello World project, if you have not downloaded it, download the Quickstart project from the @scala[[Lightbend Tech Hub](http://developer.lightbend.com/start/?group=akka&project=akka-quickstart-scala)]@java[[Lightbend Tech Hub](http://developer.lightbend.com/start/?group=akka&project=akka-quickstart-java)]. -### Structure of an ActorRef and Paths of Actors -The easiest way to see this in action is to simply print `ActorRef` instances. In this small experiment, we print -the reference of the first actor we create and then we create a child of this actor, and print its reference. We have -already created actors with `system.actorOf()`, which creates an actor under `/user` directly. We call this kind -of actors _top level_, even though in practice they are not on the top of the hierarchy, only on the top of the -_user defined_ hierarchy. Since in practice we usually concern ourselves about actors under `/user` this is still a -convenient terminology, and we will stick to it. - -Creating a non-top-level actor is possible from any actor, by invoking `context.actorOf()` which has the exact same -signature as its top-level counterpart. This is how it looks like in practice: +In your Hello World project, navigate to the `com.lightbend.akka.sample` package and create a new @scala[Scala file called `ActorHierarchyExperiments.scala`]@java[Java file called `ActorHierarchyExperiments.java`] here. Copy and paste the code from the snippet below to this new source file. Save your file and run `sbt "runMain com.lightbend.akka.sample.ActorHierarchyExperiments"` to observe the output. Scala : @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #print-refs } @@ -84,51 +33,38 @@ Scala Java : @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #print-refs } -We see that the following two lines are printed +Note the way a message asked the first actor to do its work. We sent the message by using the parent's reference: @scala[`firstRef ! "printit"`]@java[`firstRef.tell("printit", ActorRef.noSender())`]. When the code executes, the output includes the references for the first actor and the child it created as part of the `printit` case. Your output should look similar to the following: ``` -First : Actor[akka://testSystem/user/first-actor#1053618476] +First: Actor[akka://testSystem/user/first-actor#1053618476] Second: Actor[akka://testSystem/user/first-actor/second-actor#-1544706041] ``` -First, we notice that all of the paths start with `akka://testSystem/`. Since all actor references are valid URLs, there -is a protocol field needed, which is `akka://` in the case of actors. Then, just like on the World Wide Web, the system -is identified. In our case, this is `testSystem`, but could be any other name (if remote communication between multiple -systems is enabled this name is the hostname of the system so other systems can find it on the network). Our two actors, -as we have discussed before, live under user, and form a hierarchy: +Notice the structure of the references: - * `akka://testSystem/user/first-actor` is the first actor we created, which lives directly under the user guardian, - `/user` - * `akka://testSystem/user/first-actor/second-actor` is the second actor we created, using `context.actorOf`. As we - see it lives directly under the first actor. +* Both paths start with `akka://testSystem/`. Since all actor references are valid URLs, `akka://` is the value of the protocol field. +* Next, just like on the World Wide Web, the URL identifies the system. In this example, the system is named `testSystem`, but it could be any other name. If remote communication between multiple systems is enabled, this part of the URL includes the hostname so other systems can find it on the network. +* Because the second actor's reference includes the path `/first-actor/`, it identifies it as a child of the first. +* The last part of the actor reference, `#1053618476` or `#-1544706041` is a unique identifier that you can ignore in most cases. -The last part of the actor reference, like `#1053618476` is a unique identifier of the actor living under the path. -This is usually not something the user needs to be concerned with, and we leave the discussion of this field for later. +Now that you understand what the actor hierarchy +looks like, you might be wondering: _Why do we need this hierarchy? What is it used for?_ -### Hierarchy and Lifecycle of Actors +An important role of the hierarchy is to safely manage actor lifecycles. Let's consider this next and see how that knowledge can help us write better code. -We have so far seen that actors are organized into a **strict hierarchy**. This hierarchy consists of a predefined -upper layer of three actors (the root guardian, the user guardian, and the system guardian), thereafter the user created -top-level actors (those directly living under `/user`) and the children of those. We now understand what the hierarchy -looks like, but there are some nagging unanswered questions: _Why do we need this hierarchy? What is it used for?_ +### The actor lifecycle +Actors pop into existence when created, then later, at user requests, they are stopped. Whenever an actor is stopped, all of its children are _recursively stopped_ too. +This behavior greatly simplifies resource cleanup and helps avoid resource leaks such as those caused by open sockets and files. In fact, a commonly overlooked difficulty when dealing with low-level multi-threaded code is the lifecycle management of various concurrent resources. -The first use of the hierarchy is to manage the lifecycle of actors. Actors pop into existence when created, then later, -at user requests, they are stopped. Whenever an actor is stopped, all of its children are _recursively stopped_ too. -This is a very useful property and greatly simplifies cleaning up resources and avoiding resource leaks (like open -sockets files, etc.). In fact, one of the overlooked difficulties when dealing with low-level multi-threaded code is -the lifecycle management of various concurrent resources. +To stop an actor, the recommended pattern is to call @scala[`context.stop(self)`]@java[`getContext().stop(getSelf())`] inside the actor to stop itself, usually as a response to some user defined stop message or when the actor is done with its job. Stopping another actor is technically possible by calling @scala[`context.stop(actorRef)`]@java[`getContext().stop(actorRef)`], but **It is considered a bad practice to stop arbitrary actors this way**: try sending them a `PoisonPill` or custom stop message instead. -Stopping an actor can be done by calling `context.stop(actorRef)`. **It is considered a bad practice to stop arbitrary -actors this way**. The recommended pattern is to call `context.stop(self)` inside an actor to stop itself, usually as -a response to some user defined stop message or when the actor is done with its job. - -The actor API exposes many lifecycle hooks that the actor implementation can override. The most commonly used are +The Akka actor API exposes many lifecycle hooks that you can override in an actor implementation. The most commonly used are `preStart()` and `postStop()`. * `preStart()` is invoked after the actor has started but before it processes its first message. * `postStop()` is invoked just before the actor stops. No messages are processed after this point. -Again, we can try out all this with a simple experiment: +Let's use the `preStart()` and `postStop()` lifecycle hooks in a simple experiment to observe the behavior when we stop an actor. First, add the following 2 actor classes to your project: Scala : @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #start-stop } @@ -136,7 +72,15 @@ Scala Java : @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #start-stop } -After running it, we get the output +And create a 'main' class like above to start the actors and then send them a `"stop"` message: + +Scala +: @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #start-stop-main } + +Java +: @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #start-stop-main } + +You can again use `sbt` to start this program. The output should look like this: ``` first started @@ -145,19 +89,18 @@ second stopped first stopped ``` -We see that when we stopped actor `first` it recursively stopped actor `second` and thereafter it stopped itself. -This ordering is strict, _all_ `postStop()` hooks of the children are called before the `postStop()` hook of the parent +When we stopped actor `first`, it stopped its child actor, `second`, before stopping itself. This ordering is strict, _all_ `postStop()` hooks of the children are called before the `postStop()` hook of the parent is called. -The family of these lifecycle hooks is rich, and we recommend reading [the actor lifecycle](http://doc.akka.io/docs/akka/current/scala/actors.html#Actor_Lifecycle) section of the reference for all details. +The @ref:[Actor Lifecycle](../actors.md#actor-lifecycle) section of the Akka reference manual provides details on the full set of lifecyle hooks. -### Hierarchy and Failure Handling (Supervision) +### Failure handling -Parents and children are not only connected by their lifecycles. Whenever an actor fails (throws an exception or -an unhandled exception bubbles out from `receive`) it is temporarily suspended. The failure information is propagated -to the parent, which decides how to handle the exception caused by the child actor. The default _supervisor strategy_ is to -stop and restart the child. If you don't change the default strategy all failures result in a restart. We won't change -the default strategy in this simple experiment: +Parents and children are connected throughout their lifecycles. Whenever an actor fails (throws an exception or an unhandled exception bubbles out from `receive`) it is temporarily suspended. As mentioned earlier, the failure information is propagated +to the parent, which then decides how to handle the exception caused by the child actor. In this way, parents act as supervisors for their children. The default _supervisor strategy_ is to +stop and restart the child. If you don't change the default strategy all failures result in a restart. + +Let's observe the default strategy in a simple experiment. Add the following classes to your project, just as you did with the previous ones: Scala : @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #supervise } @@ -165,7 +108,15 @@ Scala Java : @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #supervise } -After running the snippet, we see the following output on the console: +And run with: + +Scala +: @@snip [ActorHierarchyExperiments.scala]($code$/scala/tutorial_1/ActorHierarchyExperiments.scala) { #supervise-main } + +Java +: @@snip [ActorHierarchyExperiments.java]($code$/java/jdocs/tutorial_1/ActorHierarchyExperiments.java) { #supervise-main } + +You should see output similar to the following: ``` supervised actor started @@ -188,62 +139,13 @@ java.lang.Exception: I failed! at akka.dispatch.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) ``` -We see that after failure the actor is stopped and immediately started. We also see a log entry reporting the -exception that was handled, in this case, our test exception. In this example we use `preStart()` and `postStop()` hooks -which are the default to be called after and before restarts, so we cannot distinguish from inside the actor if it -was started for the first time or restarted. This is usually the right thing to do, the purpose of the restart is to -set the actor in a known-good state, which usually means a clean starting stage. **What actually happens though is -that the `preRestart()` and `postRestart()` methods are called which, if not overridden, by default delegate to -`postStop()` and `preStart()` respectively**. You can experiment with overriding these additional methods and see -how the output changes. +We see that after failure the supervised actor is stopped and immediately restarted. We also see a log entry reporting the exception that was handled, in this case, our test exception. In this example we used `preStart()` and `postStop()` hooks +which are the default to be called after and before restarts, so we cannot distinguish from inside the actor whether it was started for the first time or restarted. This is usually the right thing to do, the purpose of the restart is to set the actor in a known-good state, which usually means a clean starting stage. **What actually happens though is +that the `preRestart()` and `postRestart()` methods are called which, if not overridden, by default delegate to `postStop()` and `preStart()` respectively**. You can experiment with overriding these additional methods and see how the output changes. -For the impatient, we also recommend looking into the [supervision reference page](http://doc.akka.io/docs/akka/current/general/supervision.html) for more in-depth +For the impatient, we also recommend looking into the @ref:[supervision reference page](../general/supervision.md) for more in-depth details. -### The First Actor +# Summary +We've learned about how Akka manages actors in hierarchies where parents supervise their children and handle exceptions. We saw how to create a very simple actor and child. Next, we'll apply this knowledge to our example use case by modeling the communication necessary to get information from device actors. Later, we'll deal with how to manage the actors in groups. -Actors are organized into a strict tree, where the lifecycle of every child is tied to the parent and where parents -are responsible for deciding the fate of failed children. At first, it might not be evident how to map our problem -to such a tree, but in practice, this is easier than it looks. All we need to do is to rewrite our architecture diagram -that contained nested boxes into a tree: - -![actor tree diagram of the architecture](diagrams/arch_tree_diagram.png) - -In simple terms, every component manages the lifecycle of the subcomponents. No subcomponent can outlive the parent -component. This is exactly how the actor hierarchy works. Furthermore, it is desirable that a component handles the failure -of its subcomponents. Together, these two desirable properties lead to the conclusion that the "contained-in" relationship of components should be mapped to the -"children-of" relationship of actors. - -The remaining question is how to map the top-level components to actors. It might be tempting to create the actors -representing the main components as top-level actors. We instead, recommend creating an explicit component that -represents the whole application. In other words, we will have a single top-level actor in our actor system and have -the main components as children of this actor. - -The first actor happens to be rather simple now, as we have not implemented any of the components yet. What is new -is that we have dropped using `println()` and instead use @scala[the `ActorLogging` helper trait] @java[`akka.event.Logging`] which allows us to use the -logging facility built into Akka directly. Furthermore, we are using a recommended creational pattern for actors; define a `props()` @scala[method in the [companion object](http://docs.scala-lang.org/tutorials/tour/singleton-objects.html#companions) of] @java[static method on] the actor: - -Scala -: @@snip [IotSupervisor.scala]($code$/scala/tutorial_1/IotSupervisor.scala) { #iot-supervisor } - -Java -: @@snip [IotSupervisor.java]($code$/java/jdocs/tutorial_1/IotSupervisor.java) { #iot-supervisor } - -All we need now is to tie this up with a class with the `main` entry point: - -Scala -: @@snip [IotApp.scala]($code$/scala/tutorial_1/IotApp.scala) { #iot-app } - -Java -: @@snip [IotMain.java]($code$/java/jdocs/tutorial_1/IotMain.java) { #iot-app } - -This application does very little for now, but we have the first actor in place and we are ready to extend it further. - -## What is next? - -In the following chapters we will grow the application step-by-step: - - 1. We will create the representation for a device - 2. We create the device management component - 3. We add query capabilities to device groups - 4. We add the dashboard component diff --git a/akka-docs/src/main/paradox/scala/guide/tutorial_2.md b/akka-docs/src/main/paradox/scala/guide/tutorial_2.md index 2681cbbf7f..ea4a0713d9 100644 --- a/akka-docs/src/main/paradox/scala/guide/tutorial_2.md +++ b/akka-docs/src/main/paradox/scala/guide/tutorial_2.md @@ -1,214 +1,42 @@ -# Part 2: The Device Actor +# Part 2: Creating the First Actor -In part 1 we explained how to view actor systems _in the large_, i.e. how components should be represented, how -actors should be arranged in the hierarchy. In this part, we will look at actors _in the small_ by implementing an -actor with the most common conversational patterns. +With an understanding of actor hierarchy and behavior, the remaining question is how to map the top-level components of our IoT system to actors. It might be tempting to make the actors that +represent devices and dashboards at the top level. Instead, we recommend creating an explicit component that represents the whole application. In other words, we will have a single top-level actor in our IoT system. The components that create and manage devices and dashboards will be children of this actor. This allows us to refactor the example use case architecture diagram into a tree of actors: -In particular, leaving the components aside for a while, we will implement an actor that represents a device. The -tasks of this actor will be rather simple: +![actor tree diagram of the architecture](diagrams/arch_tree_diagram.png) - * Collect temperature measurements - * Report the last measured temperature if asked -When working with objects we usually design our API as _interfaces_, which are basically a collection of abstract -methods to be filled out by the actual implementation. In the world of actors, the counterpart of interfaces is -protocols. While it is not possible to formalize general protocols in the programming language, we can formalize -its most basic elements: the messages. +We can define the first actor, the IotSupervisor, with a few simple lines of code. To start your tutorial application: -## The Query Protocol - -Just because a device have been started it does not mean that it has immediately a temperature measurement. Hence, we -need to account for the case where a temperature is not present in our protocol. This, fortunately, means that we -can test the query part of the actor without the write part present, as it can simply report an empty result. - -The protocol for obtaining the current temperature from the device actor is rather simple: - - 1. Wait for a request for the current temperature. - 2. Respond to the request with a reply containing the current temperature or an indication that it is not yet - available. - -We need two messages, one for the request, and one for the reply. A first attempt could look like this: +1. Create a new `IotSupervisor` source file in the `com.lightbend.akka.sample` package. +1. Paste the following code into the new file to define the IotSupervisor. Scala -: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_2/DeviceInProgress.scala) { #read-protocol-1 } +: @@snip [IotSupervisor.scala]($code$/scala/tutorial_2/IotSupervisor.scala) { #iot-supervisor } Java -: @@snip [DeviceInProgress.java]($code$/java/jdocs/tutorial_2/DeviceInProgress.java) { #read-protocol-1 } +: @@snip [IotSupervisor.java]($code$/java/jdocs/tutorial_2/IotSupervisor.java) { #iot-supervisor } -This is a fine approach, but it limits the flexibility of the protocol. To understand why we need to talk -about message ordering and message delivery guarantees in general. +The code is similar to the actor examples we used in the previous experiments, but notice: -## Message Ordering, Delivery Guarantees +* Instead of `println()` we use @scala[the `ActorLogging` helper trait] @java[`akka.event.Logging`], which directly invokes Akka's built in logging facility. +* We use the recommended pattern for creating actors by defining a `props()` @scala[method in the [companion object](http://docs.scala-lang.org/tutorials/tour/singleton-objects.html#companions) of] @java[static method on] the actor. -In order to give some context to the discussion below, consider an application which spans multiple network hosts. -The basic mechanism for communication is the same whether sending to an actor on the local JVM or to a remote actor, -but of course, there will be observable differences in the latency of delivery (possibly also depending on the bandwidth -of the network link and the message size) and the reliability. In the case of a remote message send there are -more steps involved which means that more can go wrong. Another aspect is that a local send will just pass a -reference to the message inside the same JVM, without any restrictions on the underlying object which is sent, -whereas a remote transport will place a limit on the message size. - -It is also important to keep in mind, that while sending inside the same JVM is significantly more reliable, if an -actor fails due to a programmer error while processing the message, the effect is basically the same as if a remote, -network request fails due to the remote host crashing while processing the message. Even though in both cases the -service is recovered after a while (the actor is restarted by its supervisor, the host is restarted by an operator -or by a monitoring system) individual requests are lost during the crash. **Writing your actors such that every -message could possibly be lost is the safe, pessimistic bet.** - -These are the rules in Akka for message sends: - - * At-most-once delivery, i.e. no guaranteed delivery. - * Message ordering is maintained per sender, receiver pair. - -### What Does "at-most-once" Mean? - -When it comes to describing the semantics of a delivery mechanism, there are three basic categories: - - * **At-most-once delivery** means that for each message handed to the mechanism, that message is delivered zero or - one time; in more casual terms it means that messages may be lost, but never duplicated. - * **At-least-once delivery** means that for each message handed to the mechanism potentially multiple attempts are made - at delivering it, such that at least one succeeds; again, in more casual terms this means that messages may be duplicated but not lost. - * **Exactly-once delivery** means that for each message handed to the mechanism exactly one delivery is made to - the recipient; the message can neither be lost nor duplicated. - -The first one is the cheapest, highest performance, least implementation overhead because it can be done in a -fire-and-forget fashion without keeping the state at the sending end or in the transport mechanism. -The second one requires retries to counter transport losses, which means keeping the state at the sending end and -having an acknowledgment mechanism at the receiving end. The third is most expensive, and has consequently worst -performance: in addition to the second, it requires the state to be kept at the receiving end in order to filter out -duplicate deliveries. - -### Why No Guaranteed Delivery? - -At the core of the problem lies the question what exactly this guarantee shall mean, i.e. at which point does -the delivery considered to be guaranteed: - - 1. When the message is sent out on the network? - 2. When the message is received by the other host? - 3. When the message is put into the target actor's mailbox? - 4. When the message is starting to be processed by the target actor? - 5. When the message is processed successfully by the target actor? - -Most frameworks/protocols claiming guaranteed delivery actually provide something similar to point 4 and 5. While this -sounds fair, **is this actually useful?** To understand the implications, consider a simple, practical example: -a user attempts to place an order and we only want to claim that it has successfully processed once it is actually on -disk in the database containing orders. - -If we rely on the guarantees of such system it will report success as soon as the order has been submitted to the -internal API that has the responsibility to validate it, process it and put it into the database. Unfortunately, -immediately after the API has been invoked the following may happen: - - * The host can immediately crash. - * Deserialization can fail. - * Validation can fail. - * The database might be unavailable. - * A programming error might occur. - -The problem is that the **guarantee of delivery** does not translate to the **domain level guarantee**. We only want to -report success once the order has been actually fully processed and persisted. **The only entity that can report -success is the application itself, since only it has any understanding of the domain guarantees required. No generalized -framework can figure out the specifics of a particular domain and what is considered a success in that domain**. In -this particular example, we only want to signal success after a successful database write, where the database acknowledged -that the order is now safely stored. **For these reasons Akka lifts the responsibilities of guarantees to the application -itself, i.e. you have to implement them yourself. On the other hand, you are in full control of the guarantees that you want -to provide**. - -### Message Ordering - -The rule is that for a given pair of actors, messages sent directly from the first to the second will not be -received out-of-order. The word directly emphasizes that this guarantee only applies when sending with the tell -operator directly to the final destination, but not when employing mediators. - -If: - - * Actor `A1` sends messages `M1`, `M2`, `M3` to `A2`. - * Actor `A3` sends messages `M4`, `M5`, `M6` to `A2`. - -This means that: - - * If `M1` is delivered it must be delivered before `M2` and `M3`. - * If `M2` is delivered it must be delivered before `M3`. - * If `M4` is delivered it must be delivered before `M5` and `M6`. - * If `M5` is delivered it must be delivered before `M6`. - * `A2` can see messages from `A1` interleaved with messages from `A3`. - * Since there is no guaranteed delivery, any of the messages may be dropped, i.e. not arrive at `A2`. - -For the full details on delivery guarantees please refer to the [reference page](http://doc.akka.io/docs/akka/current/general/message-delivery-reliability.html). - -### Revisiting the Query Protocol - -There is nothing wrong with our first query protocol but it limits our flexibility. If we want to implement resends -in the actor that queries our device actor (because of timed out requests) or want to query multiple actors it -can be helpful to put an additional query ID field in the message which helps us correlate requests with responses. - -Hence, we add one more field to our messages, so that an ID can be provided by the requester: +To provide the `main` entry point that creates the actor system, add the following code to the new @scala[`IotApp` object] @java[`IotMain` class]. Scala -: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_2/DeviceInProgress.scala) { #read-protocol-2 } +: @@snip [IotApp.scala]($code$/scala/tutorial_2/IotApp.scala) { #iot-app } Java -: @@snip [DeviceInProgress2.java]($code$/java/jdocs/tutorial_2/inprogress2/DeviceInProgress2.java) { #read-protocol-2 } +: @@snip [IotMain.java]($code$/java/jdocs/tutorial_2/IotMain.java) { #iot-app } -Our device actor has the responsibility to use the same ID for the response of a given query. Now we can sketch -our device actor: +The application does little, other than print out that it is started. But, we have the first actor in place and we are ready to add other actors. -Scala -: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_2/DeviceInProgress.scala) { #device-with-read } +## What's next? -Java -: @@snip [DeviceInProgress2.java]($code$/java/jdocs/tutorial_2/inprogress2/DeviceInProgress2.java) { #device-with-read } +In the following chapters we will grow the application gradually, by: -We maintain the current temperature, initially set to @scala[`None`] @java[`Optional.empty()`], and we simply report it back if queried. We also -added fields for the ID of the device and the group it belongs to, which we will use later. + 1. Creating the representation for a device. + 2. Creating the device management component. + 3. Adding query capabilities to device groups. -We can already write a simple test for this functionality @scala[(we use ScalaTest but any other test framework can be -used with the Akka Testkit)]: - -Scala -: @@snip [DeviceSpec.scala]($code$/scala/tutorial_2/DeviceSpec.scala) { #device-read-test } - -Java -: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_2/DeviceTest.java) { #device-read-test } - -## The Write Protocol - -As a first attempt, we could model recording the current temperature in the device actor as a single message: - - * When a temperature record request is received, update the `currentTemperature` field. - -Such a message could possibly look like this: - -Scala -: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_2/DeviceInProgress.scala) { #write-protocol-1 } - -Java -: @@snip [DeviceInProgress3.java]($code$/java/jdocs/tutorial_2/DeviceInProgress3.java) { #write-protocol-1 } - -The problem with this approach is that the sender of the record temperature message can never be sure if the message -was processed or not. We have seen that Akka does not guarantee delivery of these messages and leaves it to the -application to provide success notifications. In our case, we would like to send an acknowledgment to the sender -once we have updated our last temperature recording, e.g. @scala[`final case class TemperatureRecorded(requestId: Long)`] @java[`TemperatureRecorded`]. -Just like in the case of temperature queries and responses, it is a good idea to include an ID field to provide maximum flexibility. - -Putting read and write protocol together, the device actor will look like this: - -Scala -: @@snip [Device.scala]($code$/scala/tutorial_2/Device.scala) { #full-device } - -Java -: @@snip [Device.java]($code$/java/jdocs/tutorial_2/Device.java) { #full-device } - -We are also responsible for writing a new test case now, exercising both the read/query and write/record functionality -together: - -Scala: -: @@snip [DeviceSpec.scala]($code$/scala/tutorial_2/DeviceSpec.scala) { #device-write-read-test } - -Java: -: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_2/DeviceTest.java) { #device-write-read-test } - -## What is Next? - -So far, we have started designing our overall architecture, and we wrote our first actor directly corresponding to the -domain. We now have to create the component that is responsible for maintaining groups of devices and the device -actors themselves. diff --git a/akka-docs/src/main/paradox/scala/guide/tutorial_3.md b/akka-docs/src/main/paradox/scala/guide/tutorial_3.md index da5addf96f..ab6a76342a 100644 --- a/akka-docs/src/main/paradox/scala/guide/tutorial_3.md +++ b/akka-docs/src/main/paradox/scala/guide/tutorial_3.md @@ -1,253 +1,182 @@ -# Part 3: Device Groups and Manager +# Part 3: Working with Device Actors +In the previous topics we explained how to view actor systems _in the large_, that is, how components should be represented, how actors should be arranged in the hierarchy. In this part, we will look at actors _in the small_ by implementing the device actor. -In this chapter, we will integrate our device actors into a component that manages devices. When a new device comes -online, there is no actor representing it. We need to be able to ask the device manager component to create a new -device actor for us if necessary, in the required group (or return a reference to an already existing one). +If we were working with objects, we would typically design the API as _interfaces_, a collection of abstract methods to be filled out by the actual implementation. In the world of actors, protocols take the place of interfaces. While it is not possible to formalize general protocols in the programming language, we can compose their most basic element, messages. So, we will start by identifying the messages we will want to send to device actors. -Since we keep our tutorial system to the bare minimum, we have no actual component that interfaces with the external -world via some networking protocol. For our exercise, we will just create the API necessary to integrate with such -a component in the future. In a final system, the steps for connecting a device would look like this: +Typically, messages fall into categories, or patterns. By identifying these patterns, you will find that it becomes easier to choose between them and to implement them. The first example demonstrates the _request-respond_ message pattern. - 1. The device connects through some protocol to our system. - 2. The component managing network connections accept the connection. - 3. The ID of the device and the ID of the group that it belongs is acquired. - 4. The device manager component is asked to create a group and device actor for the given IDs (or return an existing - one). - 5. The device actor (just been created or located) responds with an acknowledgment, at the same time exposing its - ActorRef directly (by being the sender of the acknowledgment). - 6. The networking component now uses the ActorRef of the device directly, avoiding going through the component. +## Identifying messages for devices +The tasks of a device actor will be simple: -We are only concerned with steps 4 and 5 now. We will model the device manager component as an actor tree with three -levels: + * Collect temperature measurements + * When asked, report the last measured temperature -![device manager tree](diagrams/device_manager_tree.png) +However, a device might start without immediately having a temperature measurement. Hence, we need to account for the case where a temperature is not present. This also allows us to test the query part of the actor without the write part present, as the device actor can simply report an empty result. - * The top level is the supervisor actor representing the component. It is also the entry point to look up or create - group and device actors. - * Device group actors are supervisors of the devices belonging to the group. Apart from supervising the device actors they - also provide extra services, like querying the temperature readings from all the devices available. - * Device actors manage all the interactions with the actual devices, storing temperature readings for example. +The protocol for obtaining the current temperature from the device actor is simple. The actor: -When designing actor systems one of the main challenges is to decide on the granularity of the actors. For example, it -would be perfectly possible to have only a single actor maintaining all the groups and devices in `HashMap`s for -example. It would be also reasonable to keep the groups as separate actors, but keep device state simply inside -the group actor. + 1. Waits for a request for the current temperature. + 2. Responds to the request with a reply that either: -We chose this three-layered architecture for the following reasons: + * contains the current temperature or, + * indicates that a temperature is not yet available. - * Having groups as individual actors: - * Allows us to isolate failures happening in a group. If a programmer error would - happen in the single actor that keeps all state, it would be all wiped out once that actor is restarted affecting groups that are otherwise non-faulty. - * Simplifies the problem of querying all the devices belonging to a group (since it only contains state related - to the given group). - * Increases the parallelism of the system by allowing to query multiple groups concurrently. Since groups have - dedicated actors, all of them can run concurrently. - * Having devices as individual actors: - * Allows us to isolate failures happening in a device actor from the rest of the devices. - * Increases the parallelism of collecting temperature readings as actual network connections from different devices - can talk to the individual device actors directly, reducing contention points. - -In practice, a system can be organized in multiple ways, all depending on the characteristics of the interactions -between actors. - -The following guidelines help to arrive at the right granularity: - - * Prefer larger granularity to smaller. Introducing more fine-grained actors than needed causes more problems than - it solves. - * Prefer finer granularity if it enables higher concurrency in the system. - * Prefer finer granularity if actors need to handle complex conversations with other actors and hence have many - states. We will see a very good example for this in the next chapter. - * Prefer finer granularity if there is too much state to keep around in one place compared to dividing into smaller - actors. - * Prefer finer granularity if the current actor has multiple unrelated responsibilities that can fail and restored - individually. - - -## The Registration Protocol - -As the first step, we need to design the protocol for registering a device and create an actor that will be responsible -for it. This protocol will be provided by the `DeviceManager` component itself because that is the only actor that -is known up front: device groups and device actors are created on-demand. The steps of registering a device are the following: - - 1. DeviceManager receives the request to track a device for a given group and device. - 2. If the manager already has an actor for the device group, it forwards the request to it. Otherwise, it first creates - a new one and then forwards the request. - 3. The DeviceGroup receives the request to register an actor for the given device. - 4. If the group already has an actor for the device, it forwards the request to it. Otherwise, it first creates - a new one and then forwards the request. - 5. The device actor receives the request and acknowledges it to the original sender. Since the device actor is the sender of - the acknowledgment, the receiver, i.e. the device, will be able to learn its `ActorRef` and send direct messages to its device actor in the future. - -Now that the steps are defined, we only need to define the messages that we will use to communicate requests and -their acknowledgement: - -@@snip [DeviceManager.scala]($code$/scala/tutorial_3/DeviceManager.scala) { #device-manager-msgs } - -As you see, in this case, we have not included a request ID field in the messages. Since registration is usually happening -once, at the component that connects the system to some network protocol, we will usually have no use for the ID. -Nevertheless, it is a good exercise to add this ID. - -## Add Registration Support to Device Actor - -We start implementing the protocol from the bottom first. In practice, both a top-down and bottom-up approach can -work, but in our case, we benefit from the bottom-up approach as it allows us to immediately write tests for the -new features without mocking out parts. - -At the bottom of our hierarchy are the `Device` actors. Their job in this registration process is rather simple, just reply to the -registration request with an acknowledgment to the sender. *We will assume that the sender of the registration -message is preserved in the upper layers.* We will show you in the next section how this can be achieved. - -We also add a safeguard against requests that come with a mismatched group or device ID. This is how the resulting -the code looks like: - -@@@ note { .group-scala } - -We used a feature of scala pattern matching where we can match if a certain field equals to an expected -value. This is achieved by variables included in backticks, like `` `variable` ``, and it means that the pattern -only match if it contains the value of `variable` in that position. - -@@@ +We need two messages, one for the request, and one for the reply. Our first attempt might look like the following: Scala -: @@snip [Device.scala]($code$/scala/tutorial_3/Device.scala) { #device-with-register } +: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_3/DeviceInProgress.scala) { #read-protocol-1 } Java -: @@snip [Device.java]($code$/java/jdocs/tutorial_3/Device.java) { #device-with-register } +: @@snip [DeviceInProgress.java]($code$/java/jdocs/tutorial_3/DeviceInProgress.java) { #read-protocol-1 } -We should not leave features untested, so we immediately write two new test cases, one exercising successful -registration, the other testing the case when IDs don't match: +These two messages seem to cover the required functionality. However, the approach we choose must take into account the distributed nature of the application. While the basic mechanism is the same for communicating with an actor on the local JVM as with a remote actor, we need to keep the following in mind: -@@@ note +* There will be observable differences in the latency of delivery between local and remote messages, because factors like network link bandwidth and the message size also come into play. +* Reliability is a concern because a remote message send involves more steps, which means that more can go wrong. +* A local send will just pass a reference to the message inside the same JVM, without any restrictions on the underlying object which is sent, whereas a remote transport will place a limit on the message size. -We used the `expectNoMsg()` helper method from @scala[`TestProbe`] @java[`TestKit`]. This assertion waits until the defined time-limit -and fails if it receives any messages during this period. If no messages are received during the waiting period the -assertion passes. It is usually a good idea to keep these timeouts low (but not too low) because they add significant -test execution time otherwise. +In addition, while sending inside the same JVM is significantly more reliable, if an +actor fails due to a programmer error while processing the message, the effect is basically the same as if a remote network request fails due to the remote host crashing while processing the message. Even though in both cases, the service recovers after a while (the actor is restarted by its supervisor, the host is restarted by an operator or by a monitoring system) individual requests are lost during the crash. **Therefore, writing your actors such that every +message could possibly be lost is the safe, pessimistic bet.** -@@@ +But to further understand the need for flexibility in the protocol, it will help to consider Akka message ordering and message delivery guarantees. Akka provides the following behavior for message sends: + + * At-most-once delivery, that is, no guaranteed delivery. + * Message ordering is maintained per sender, receiver pair. + +The following sections discuss this behavior in more detail: + +* [Message delivery](#message-delivery) +* [Message ordering](#message-ordering) + +### Message delivery +The delivery semantics provided by messaging subsystems typically fall into the following categories: + + * **At-most-once delivery** — each message is delivered zero or one time; in more causal terms it means that messages can be lost, but are never duplicated. + * **At-least-once delivery** — potentially multiple attempts are made to deliver each message, until at least one succeeds; again, in more causal terms this means that messages can be duplicated but are never lost. + * **Exactly-once delivery** — each message is delivered exactly once to the recipient; the message can neither be lost nor be duplicated. + +The first behavior, the one used by Akka, is the cheapest and results in the highest performance. It has the least implementation overhead because it can be done in a fire-and-forget fashion without keeping the state at the sending end or in the transport mechanism. The second, at-least-once, requires retries to counter transport losses. This adds the overhead of keeping the state at the sending end and having an acknowledgment mechanism at the receiving end. Exactly-once delivery is most expensive, and results in the worst performance: in addition to the overhead added by at-least-once delivery, it requires the state to be kept at the receiving end in order to filter out +duplicate deliveries. + +In an actor system, we need to determine exact meaning of a guarantee — at which point does the system consider the delivery as accomplished: + + 1. When the message is sent out on the network? + 2. When the message is received by the target actor's host? + 3. When the message is put into the target actor's mailbox? + 4. When the message target actor starts to process the message? + 5. When the target actor has successfully processed the message? + +Most frameworks and protocols that claim guaranteed delivery actually provide something similar to points 4 and 5. While this sounds reasonable, **is it actually useful?** To understand the implications, consider a simple, practical example: a user attempts to place an order and we only want to claim that it has successfully processed once it is actually on disk in the orders database. + +If we rely on the successful processing of the message, the actor will report success as soon as the order has been submitted to the internal API that has the responsibility to validate it, process it and put it into the database. Unfortunately, +immediately after the API has been invoked any the following can happen: + + * The host can crash. + * Deserialization can fail. + * Validation can fail. + * The database might be unavailable. + * A programming error might occur. + +This illustrates that the **guarantee of delivery** does not translate to the **domain level guarantee**. We only want to report success once the order has been actually fully processed and persisted. **The only entity that can report success is the application itself, since only it has any understanding of the domain guarantees required. No generalized framework can figure out the specifics of a particular domain and what is considered a success in that domain**. + +In this particular example, we only want to signal success after a successful database write, where the database acknowledged that the order is now safely stored. **For these reasons Akka lifts the responsibilities of guarantees to the application +itself, i.e. you have to implement them yourself. This gives you full control of the guarantees that you want to provide**. Now, let's consider the message ordering that Akka provides to make it easy to reason about application logic. + +### Message Ordering + +In Akka, for a given pair of actors, messages sent directly from the first to the second will not be received out-of-order. The word directly emphasizes that this guarantee only applies when sending with the tell operator directly to the final destination, but not when employing mediators. + +If: + + * Actor `A1` sends messages `M1`, `M2`, `M3` to `A2`. + * Actor `A3` sends messages `M4`, `M5`, `M6` to `A2`. + +This means that, for Akka messages: + + * If `M1` is delivered it must be delivered before `M2` and `M3`. + * If `M2` is delivered it must be delivered before `M3`. + * If `M4` is delivered it must be delivered before `M5` and `M6`. + * If `M5` is delivered it must be delivered before `M6`. + * `A2` can see messages from `A1` interleaved with messages from `A3`. + * Since there is no guaranteed delivery, any of the messages may be dropped, i.e. not arrive at `A2`. + +These guarantees strike a good balance: having messages from one actor arrive in-order is convenient for building systems that can be easily reasoned about, while on the other hand allowing messages from different actors to arrive interleaved provides sufficient freedom for an efficient implementation of the actor system. + +For the full details on delivery guarantees please refer to the @ref:[reference page](../general/message-delivery-reliability.md). + +## Adding flexibility to device messages + +Our first query protocol was correct, but did not take into account distributed application execution. If we want to implement resends in the actor that queries a device actor (because of timed out requests), or if we want to query multiple actors, we need to be able to correlate requests and responses. Hence, we add one more field to our messages, so that an ID can be provided by the requester (we will add this code to our app in a later step): Scala -: @@snip [DeviceSpec.scala]($code$/scala/tutorial_3/DeviceSpec.scala) { #device-registration-tests } +: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_3/DeviceInProgress.scala) { #read-protocol-2 } Java -: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_3/DeviceTest.java) { #device-registration-tests } +: @@snip [DeviceInProgress2.java]($code$/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java) { #read-protocol-2 } -## Device Group +## Defining the device actor and its read protocol -We are done with the registration support at the device level, now we have to implement it at the group level. A group -has more work to do when it comes to registrations. It must either forward the request to an existing child, or it -should create one. To be able to look up child actors by their device IDs we will use a @scala[`Map[String, ActorRef]`] @java[`Map`]. - -We also want to keep the original sender of the request so that our device actor can reply directly. This is possible -by using `forward` instead of the @scala[`!`] @java[`tell`] operator. The only difference between the two is that `forward` keeps the original -sender while @scala[`!`] @java[`tell`] always sets the sender to be the current actor. Just like with our device actor, we ensure that we don't -respond to wrong group IDs: +As we learned in the Hello World example, each actor defines the type of messages it will accept. Our device actor has the responsibility to use the same ID parameter for the response of a given query, which would make it look like the following. Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_3/DeviceGroup.scala) { #device-group-register } +: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_3/DeviceInProgress.scala) { #device-with-read } Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_3/DeviceGroup.java) { #device-group-register } +: @@snip [DeviceInProgress2.java]($code$/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java) { #device-with-read } -Just as we did with the device, we test this new functionality. We also test that the actors returned for the two -different IDs are actually different, and we also attempt to record a temperature reading for each of the devices -to see if the actors are responding. +Note in the code that: + +* The @scala[companion object]@java[static method] defines how to construct a `Device` actor. The `props` parameters include an ID for the device and the group to which it belongs, which we will use later. +* The @scala[companion object]@java[class] includes the definitions of the messages we reasoned about previously. +* In the `Device` class, the value of `lastTemperatureReading` is initially set to @scala[`None`]@java[`Optional.empty()`], and the actor will simply report it back if queried. + +## Testing the actor + +Based on the simple actor above, we could write a simple test. In the `com.lightbend.akka.sample` package in the test tree of your project, add the following code to a @scala[`DeviceSpec.scala`]@java[`DeviceTest.java`] file. +@scala[(We use ScalaTest but any other test framework can be used with the Akka Testkit)]. + +You can run this test @java[by running `mvn test` or] by running `test` at the sbt prompt. Scala -: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_3/DeviceGroupSpec.scala) { #device-group-test-registration } +: @@snip [DeviceSpec.scala]($code$/scala/tutorial_3/DeviceSpec.scala) { #device-read-test } Java -: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_3/DeviceGroupTest.java) { #device-group-test-registration } +: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_3/DeviceTest.java) { #device-read-test } -It might be, that a device actor already exists for the registration request. In this case, we would like to use -the existing actor instead of a new one. We have not tested this yet, so we need to fix this: +Now, the actor needs a way to change the state of the temperature when it receives a message from the sensor. + +## Adding a write protocol + +The purpose of the write protocol is to update the `currentTemperature` field when the actor receives a message that contains the temperature. Again, it is tempting to define the write protocol as a very simple message, something like this: Scala -: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_3/DeviceGroupSpec.scala) { #device-group-test3 } +: @@snip [DeviceInProgress.scala]($code$/scala/tutorial_3/DeviceInProgress.scala) { #write-protocol-1 } Java -: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_3/DeviceGroupTest.java) { #device-group-test3 } +: @@snip [DeviceInProgress3.java]($code$/java/jdocs/tutorial_3/DeviceInProgress3.java) { #write-protocol-1 } -So far, we have implemented everything for registering device actors in the group. Devices come and go, however, so -we will need a way to remove those from the @scala[`Map[String, ActorRef]`] @java[`Map`]. We will assume that when a device is removed, its corresponding device actor -is simply stopped. We need some way for the parent to be notified when one of the device actors are stopped. Unfortunately, -supervision will not help because it is used for error scenarios, not graceful stopping. +However, this approach does not take into account that the sender of the record temperature message can never be sure if the message was processed or not. We have seen that Akka does not guarantee delivery of these messages and leaves it to the application to provide success notifications. In our case, we would like to send an acknowledgment to the sender once we have updated our last temperature recording, e.g. @scala[`final case class TemperatureRecorded(requestId: Long)`]@java[`TemperatureRecorded`]. +Just like in the case of temperature queries and responses, it is a good idea to include an ID field to provide maximum flexibility. -There is a feature in Akka that is exactly what we need here. It is possible for an actor to _watch_ another actor -and be notified if the other actor is stopped. This feature is called _Death Watch_ and it is an important tool for -any Akka application. Unlike supervision, watching is not limited to parent-child relationships, any actor can watch -any other actor given its `ActorRef`. After a watched actor stops, the watcher receives a `Terminated(ref)` message -which also contains the reference to the watched actor. The watcher can either handle this message explicitly or, if -it does not handle it directly it will fail with a `DeathPactException`. This latter is useful if the actor cannot -longer perform its duties after its collaborator actor has been stopped. In our case, the group should still function -after one device have been stopped, so we need to handle this message. The steps we need to follow are the following: +## Actor with read and write messages - 1. Whenever we create a new device actor, we must also watch it. - 2. When we are notified that a device actor has been stopped we also need to remove it from the @scala[`Map[String, ActorRef]`] @java[`Map`] which maps - devices to device actors. - -Unfortunately, the `Terminated` message only contains the `ActorRef` of the child actor but we do not know -its ID, which we need to remove it from the map of existing device to device actor mappings. To be able to do this removal, we -need to introduce another placeholder, @scala[`Map[ActorRef, String]`] @java[`Map`], that allow us to find out the device ID corresponding to a given `ActorRef`. Putting -this together the result is: +Putting the read and write protocol together, the device actor looks like the following example: Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_3/DeviceGroup.scala) { #device-group-remove } +: @@snip [Device.scala]($code$/scala/tutorial_3/Device.scala) { #full-device } Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_3/DeviceGroup.java) { #device-group-remove } +: @@snip [Device.java]($code$/java/jdocs/tutorial_3/Device.java) { #full-device } -So far we have no means to get what devices the group device actor keeps track of and, therefore, we cannot test our -new functionality yet. To make it testable, we add a new query capability (message @scala[`RequestDeviceList(requestId: Long)`] @java[`RequestDeviceList`]) that simply lists the currently active -device IDs: +We should also write a new test case now, exercising both the read/query and write/record functionality together: -Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_3/DeviceGroup.scala) { #device-group-full } +Scala: +: @@snip [DeviceSpec.scala]($code$/scala/tutorial_3/DeviceSpec.scala) { #device-write-read-test } -Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_3/DeviceGroup.java) { #device-group-full } +Java: +: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_3/DeviceTest.java) { #device-write-read-test } -We almost have everything to test the removal of devices. What is missing is: +## What's Next? - * Stopping a device actor from our test case, from the outside: any actor can be stopped by simply sending a special - the built-in message, `PoisonPill`, which instructs the actor to stop. - * Be notified once the device actor is stopped: we can use the _Death Watch_ facility for this purpose, too. Thankfully - the @scala[`TestProbe`] @java[`TestKit`] has two messages that we can easily use, `watch()` to watch a specific actor, and `expectTerminated` - to assert that the watched actor has been terminated. - -We add two more test cases now. In the first, we just test that we get back the list of proper IDs once we have added -a few devices. The second test case makes sure that the device ID is properly removed after the device actor has - been stopped: - -Scala -: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_3/DeviceGroupSpec.scala) { #device-group-list-terminate-test } - -Java -: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_3/DeviceGroupTest.java) { #device-group-list-terminate-test } - -## Device Manager - -The only part that remains now is the entry point for our device manager component. This actor is very similar to -the device group actor, with the only difference that it creates device group actors instead of device actors: - -Scala -: @@snip [DeviceManager.scala]($code$/scala/tutorial_3/DeviceManager.scala) { #device-manager-full } - -Java -: @@snip [DeviceManager.java]($code$/java/jdocs/tutorial_3/DeviceManager.java) { #device-manager-full } - -We leave tests of the device manager as an exercise as it is very similar to the tests we have written for the group -actor. - -## What is Next? - -We have now a hierarchical component for registering and tracking devices and recording measurements. We have seen -some conversation patterns like: - - * Request-respond (for temperature recordings). - * Delegate-respond (for registration of devices). - * Create-watch-terminate (for creating the group and device actor as children). - -In the next chapter, we will introduce group query capabilities, which will establish a new conversation pattern of -scatter-gather. In particular, we will implement the functionality that allows users to query the status of all -the devices belonging to a group. +So far, we have started designing our overall architecture, and we wrote the first actor that directly corresponds to the domain. We now have to create the component that is responsible for maintaining groups of devices and the device actors themselves. diff --git a/akka-docs/src/main/paradox/scala/guide/tutorial_4.md b/akka-docs/src/main/paradox/scala/guide/tutorial_4.md index 23c71265ba..675e93e5b9 100644 --- a/akka-docs/src/main/paradox/scala/guide/tutorial_4.md +++ b/akka-docs/src/main/paradox/scala/guide/tutorial_4.md @@ -1,253 +1,218 @@ -# Part 4: Querying a Group of Devices +# Part 4: Working with Device Groups -The conversational patterns we have seen so far were simple in the sense that they required no or little state to be kept in the -actor that is only relevant to the conversation. Our device actors either simply returned a reading, which required no -state change, recorded a temperature, which was required an update of a single field, or in the most complex case, -managing groups and devices, we had to add or remove simple entries from a map. +Let's take a closer look at the main functionality required by our use case. In a complete IoT system for monitoring home temperatures, the steps for connecting a device sensor to our system might look like this: -In this chapter, we will see a more complex example. Our goal is to add a new service to the group device actor, one which -allows querying the temperature from all running devices. Let us start by investigating how we want our query API to -behave. +1. A sensor device in the home connects through some protocol. +1. The component managing network connections accepts the connection. +1. The sensor provides its group and device ID to register with the device manager component of our system. +1. The device manager component handles registration by looking up or creating the actor responsible for keeping sensor state. +1. The actor responds with an acknowledgement, exposing its `ActorRef`. +1. The networking component now uses the `ActorRef` for communication between the sensor and device actor without going through the device manager. -The very first issue we face is that the set of devices is dynamic, and each device is represented by an actor that -can stop at any time. At the beginning of the query, we need to ask all of the device actors for the current temperature -that we know about. However, during the lifecycle of the query: +Steps 1 and 2 take place outside the boundaries of our tutorial system. In this chapter, we will start addressing steps 3-6 and create a way for sensors to register with our system and to communicate with actors. But first, we have another architectural decision — how many levels of actors should we use to represent device groups and device sensors? - * A device actor may stop and not respond back with a temperature reading. - * A new device actor might start up, but we missed asking it for the current temperature. +One of the main design challenges for Akka programmers is choosing the best granularity for actors. In practice, depending on the characteristics of the interactions between actors, there are usually several valid ways to organize a system. In our use case, for example, it would be possible to have a single actor maintain all the groups and devices — perhaps using hash maps. It would also be reasonable to have an actor for each group that tracks the state of all devices in the same home. -There are many approaches that can be taken to address these issues, but the important point is to settle on what is -the desired behavior. We will pick the following two guarantees: +The following guidelines help us choose the most appropriate actor hierarchy: - * When a query arrives at the group, the group actor takes a _snapshot_ of the existing device actors and will only - ask those for the temperature. Actors that are started _after_ the arrival of the query are simply ignored. - * When an actor stops during the query without answering (i.e. before all the actors we asked for the temperature - responded) we simply report back that fact to the sender of the query message. + * In general, prefer larger granularity. Introducing more fine-grained actors than needed causes more problems than it solves. + * Add finer granularity when the system requires: + * Higher concurrency. + * Complex conversations between actors that have many + states. We will see a very good example for this in the next chapter. + * Sufficient state that it makes sense to divide into smaller + actors. + * Multiple unrelated responsibilities. Using separate actors allows individuals to fail and be restored with little impact on others. -Apart from device actors coming and going dynamically, some actors might take a long time to answer, for example, because -they are stuck in an accidental infinite loop, or because they failed due to a bug and dropped our request. Ideally, -we would like to give a deadline to our query: +## Device manager hierarchy - * The query is considered completed if either all actors have responded (or confirmed being stopped), or we reach - the deadline. +Considering the principles outlined in the previous section, We will model the device manager component as an actor tree with three levels: -Given these decisions, and the fact that a device might not have a temperature to record, we can define four states -that each device can be in, according to the query: +* The top level supervisor actor represents the system component for devices. It is also the entry point to look up and create device group and device actors. +* At the next level, group actors each supervise the device actors for one group id (e.g. one home). They also provide services, such as querying temperature readings from all of the available devices in their group. +* Device actors manage all the interactions with the actual device sensors, such as storing temperature readings. - * It has a temperature available: @scala[`Temperature(value)`] @java[`Temperature`]. - * It has responded, but has no temperature available yet: `TemperatureNotAvailable`. - * It has stopped before answering: `DeviceNotAvailable`. - * It did not respond before the deadline: `DeviceTimedOut`. +![device manager tree](diagrams/device_manager_tree.png) -Summarizing these in message types we can add the following to `DeviceGroup`: +We chose this three-layered architecture for these reasons: + +* Having groups of individual actors: + * Isolates failures that occur in a group. If a single actor managed all device groups, an error in one group that causes a restart would wipe out the state of groups that are otherwise non-faulty. + * Simplifies the problem of querying all the devices belonging to a group. Each group actor only contains state related to its group. + * Increases parallelism in the system. Since each group has a dedicated actor, they run concurrently and we can query multiple groups concurrently. + + +* Having sensors modeled as individual device actors: + * Isolates failures of one device actor from the rest of the devices in the group. + * Increases the parallelism of collecting temperature readings. Network connections from different sensors communicate with their individual device actors directly, reducing contention points. + +With the architecture defined, we can start working on the protocol for registering sensors. + +## The Registration Protocol + +As the first step, we need to design the protocol both for registering a device and for creating the group and device actors that will be responsible for it. This protocol will be provided by the `DeviceManager` component itself because that is the only actor that is known and available up front: device groups and device actors are created on-demand. + +Looking at registration in more detail, we can outline the necessary functionality: + +1. When a `DeviceManager` receives a request with a group and device id: + * If the manager already has an actor for the device group, it forwards the request to it. + * Otherwise, it creates a new device group actor and then forwards the request. +1. The `DeviceGroup` actor receives the request to register an actor for the given device: + * If the group already has an actor for the device, the group actor forwards the request to the device actor. + * Otherwise, the `DeviceGroup` actor first creates a device actor and then forwards the request. +1. The device actor receives the request and sends an acknowledgement to the original sender. Since the device actor acknowledges receipt (instead of the group actor), the sensor will now have the `ActorRef` to send messages directly to its actor. + +The messages that we will use to communicate registration requests and +their acknowledgement have a simple definition: Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_4/DeviceGroup.scala) { #query-protocol } +: @@snip [DeviceManager.scala]($code$/scala/tutorial_4/DeviceManager.scala) { #device-manager-msgs } Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_4/DeviceGroup.java) { #query-protocol } +: @@snip [DeviceManager.java]($code$/java/jdocs/tutorial_4/DeviceManager.java) { #device-manager-msgs } -## Implementing the Query +In this case we have not included a request ID field in the messages. Since registration happens once, when the component connects the system to some network protocol, the ID is not important. However, it is usually a best practice to include a request ID. -One of the approaches for implementing the query could be to add more code to the group device actor. While this is -possible, in practice this can be very cumbersome and error prone. When we start a query, we need to take a snapshot -of the devices present at the start of the query and start a timer so that we can enforce the deadline. Unfortunately, -during the time we execute a query _another query_ might just arrive. For this other query, of course, we need to keep -track of the exact same information but isolated from the previous query. This complicates the code and also poses -some problems. For example, we would need a data structure that maps the `ActorRef`s of the devices to the queries -that use that device, so that they can be notified when such a device terminates, i.e. a `Terminated` message is -received. +Now, we'll start implementing the protocol from the bottom up. In practice, both a top-down and bottom-up approach can work, but in our case, we benefit from the bottom-up approach as it allows us to immediately write tests for the new features without mocking out parts that we will need to build later. -There is a much simpler approach that is superior in every way, and it is the one we will implement. We will create -an actor that represents a _single query_ and which performs the tasks needed to complete the query on behalf of the -group actor. So far we have created actors that belonged to classical domain objects, but now, we will create an -actor that represents a process or task rather than an entity. This move keeps our group device actor simple and gives -us better ways to test the query capability in isolation. +## Adding registration support to device actors -First, we need to design the lifecycle of our query actor. This consists of identifying its initial state, then -the first action to be taken by the actor, then, the cleanup if necessary. There are a few things the query should -need to be able to work: +At the bottom of our hierarchy are the `Device` actors. Their job in the registration process is simple: reply to the registration request with an acknowledgment to the sender. It is also prudent to add a safeguard against requests that come with a mismatched group or device ID. - * The snapshot of active device actors to query, and their IDs. - * The requestID of the request that started the query (so we can include it in the reply). - * The `ActorRef` of the actor who sent the group actor the query. We will send the reply to this actor directly. - * A timeout parameter, how long the query should wait for replies. Keeping this as a parameter will simplify testing. +*We will assume that the ID of the sender of the registration +message is preserved in the upper layers.* We will show you in the next section how this can be achieved. -Since we need to have a timeout for how long we are willing to wait for responses, it is time to introduce a new feature that we have -not used yet: timers. Akka has a built-in scheduler facility for this exact purpose. Using it is simple, the -@scala[`scheduler.scheduleOnce(time, actorRef, message)`] @java[`scheduler.scheduleOnce(time, actorRef, message, executor, sender)`] method will schedule the message `message` into the future by the -specified `time` and send it to the actor `actorRef`. To implement our query timeout we need to create a message -that represents the query timeout. We create a simple message `CollectionTimeout` without any parameters for -this purpose. The return value from `scheduleOnce` is a `Cancellable` which can be used to cancel the timer -if the query finishes successfully in time. Getting the scheduler is possible from the `ActorSystem`, which, in turn, -is accessible from the actor's context: @scala[`context.system.scheduler`] @java[`getContext().getSystem().scheduler()`]. This needs an @scala[implicit] `ExecutionContext` which -is basically the thread-pool that will execute the timer task itself. In our case, we use the same dispatcher -as the actor by @scala[importing `import context.dispatcher`] @java[passing in `getContext().dispatcher()`]. - -At the start of the query, we need to ask each of the device actors for the current temperature. To be able to quickly -detect devices that stopped before they got the `ReadTemperature` message we will also watch each of the actors. This -way, we get `Terminated` messages for those that stop during the lifetime of the query, so we don't need to wait -until the timeout to mark these as not available. - -Putting together all these, the outline of our actor looks like this: +The device actor registration code looks like the following. Modify your example to match. Scala -: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_4/DeviceGroupQuery.scala) { #query-outline } +: @@snip [Device.scala]($code$/scala/tutorial_4/Device.scala) { #device-with-register } Java -: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_4/DeviceGroupQuery.java) { #query-outline } +: @@snip [Device.java]($code$/java/jdocs/tutorial_4/Device.java) { #device-with-register } -The query actor, apart from the pending timer, has one stateful aspect about it: the actors that did not answer so far or, -from the other way around, the set of actors that have replied or stopped. One way to track this state is -to create a mutable field in the actor @scala[(a `var`)]. There is another approach. It is also possible to change how -the actor responds to messages. By default, the `receive` block defines the behavior of the actor, but it is possible -to change it, several times, during the life of the actor. This is possible by calling `context.become(newBehavior)` -where `newBehavior` is anything with type `Receive` @scala[(which is just a shorthand for `PartialFunction[Any, Unit]`)]. A -`Receive` is just a function (or an object, if you like) that can be returned from another function. We will leverage this -feature to track the state of our actor. +@@@ note { .group-scala } -As the first step, instead of defining `receive` directly, we delegate to another function to create the `Receive`, which -we will call `waitingForReplies`. This will keep track of two changing values, a `Map` of already received replies -and a `Set` of actors that we still wait on. We have three events that we should act on. We can receive a -`RespondTemperature` message from one of the devices. Second, we can receive a `Terminated` message for a device actor -that has been stopped in the meantime. Finally, we can reach the deadline and receive a `CollectionTimeout`. In the -first two cases, we need to keep track of the replies, which we now simply delegate to a method `receivedResponse` which -we will discuss later. In the case of timeout, we need to simply take all the actors that have not yet replied yet -(the members of the set `stillWaiting`) and put a `DeviceTimedOut` as the status in the final reply. Then we -reply to the submitter of the query with the collected results and stop the query actor: +We used a feature of scala pattern matching where we can check to see if a certain field equals an expected value. By bracketing variables with backticks, like `` `variable` ``, the pattern will only match if it contains the value of `variable` in that position. + +@@@ + +We can now write two new test cases, one exercising successful registration, the other testing the case when IDs don't match: Scala -: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_4/DeviceGroupQuery.scala) { #query-state } +: @@snip [DeviceSpec.scala]($code$/scala/tutorial_4/DeviceSpec.scala) { #device-registration-tests } Java -: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_4/DeviceGroupQuery.java) { #query-state } +: @@snip [DeviceTest.java]($code$/java/jdocs/tutorial_4/DeviceTest.java) { #device-registration-tests } -What is not yet clear, how we will "mutate" the `answersSoFar` and `stillWaiting` data structures. One important -thing to note is that the function `waitingForReplies` **does not handle the messages directly. It returns a `Receive` -function that will handle the messages**. This means that if we call `waitingForReplies` again, with different parameters, -then it returns a brand new `Receive` that will use those new parameters. We have seen how we -can install the initial `Receive` by simply returning it from `receive`. In order to install a new one, to record a -new reply, for example, we need some mechanism. This mechanism is the method `context.become(newReceive)` which will -_change_ the actor's message handling function to the provided `newReceive` function. You can imagine that before -starting, your actor automatically calls `context.become(receive)`, i.e. installing the `Receive` function that -is returned from `receive`. This is another important observation: **it is not `receive` that handles the messages, -it just returns a `Receive` function that will actually handle the messages**. +@@@ note -We now have to figure out what to do in `receivedResponse`. First, we need to record the new result in the map -`repliesSoFar` and remove the actor from `stillWaiting`. The next step is to check if there are any remaining actors -we are waiting for. If there is none, we send the result of the query to the original requester and stop -the query actor. Otherwise, we need to update the `repliesSoFar` and `stillWaiting` structures and wait for more -messages. +We used the `expectNoMsg()` helper method from @scala[`TestProbe`]@java[`TestKit`]. This assertion waits until the defined time-limit and fails if it receives any messages during this period. If no messages are received during the waiting period, the assertion passes. It is usually a good idea to keep these timeouts low (but not too low) because they add significant test execution time. -In the code before, we treated `Terminated` as the implicit response `DeviceNotAvailable`, so `receivedResponse` does -not need to do anything special. However, there is one small task we still need to do. It is possible that we receive a proper -response from a device actor, but then it stops during the lifetime of the query. We don't want this second event -to overwrite the already received reply. In other words, we don't want to receive `Terminated` after we recorded the -response. This is simple to achieve by calling `context.unwatch(ref)`. This method also ensures that we don't -receive `Terminated` events that are already in the mailbox of the actor. It is also safe to call this multiple times, -only the first call will have any effect, the rest is simply ignored. +@@@ -With all this knowledge, we can create the `receivedResponse` method: + +## Adding registration support to device group actors + +We are done with registration support at the device level, now we have to implement it at the group level. A group actor has more work to do when it comes to registrations, including: + +* Handling the registration request by either forwarding it to an existing device actor or by creating a new actor and forwarding the message. +* Tracking which device actors exist in the group and removing them from the group when they are stopped. + +### Handling the registration request + +A device group actor must either forward the request to an existing child, or it should create one. To look up child actors by their device IDs we will use a @scala[`Map[String, ActorRef]`]@java[`Map`]. + +We also want to keep the the ID of the original sender of the request so that our device actor can reply directly. This is possible by using `forward` instead of the @scala[`!`] @java[`tell`] operator. The only difference between the two is that `forward` keeps the original +sender while @scala[`!`] @java[`tell`] sets the sender to be the current actor. Just like with our device actor, we ensure that we don't respond to wrong group IDs. Add the following to your source file: Scala -: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_4/DeviceGroupQuery.scala) { #query-collect-reply } +: @@snip [DeviceGroup.scala]($code$/scala/tutorial_4/DeviceGroup.scala) { #device-group-register } Java -: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_4/DeviceGroupQuery.java) { #query-collect-reply } +: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-register } -It is quite natural to ask at this point, what have we gained by using the `context.become()` trick instead of -just making the `repliesSoFar` and `stillWaiting` structures mutable fields of the actor (i.e. `var`s)? In this -simple example, not that much. The value of this style of state keeping becomes more evident when you suddenly have -_more kinds_ of states. Since each state -might have temporary data that is relevant itself, keeping these as fields would pollute the global state -of the actor, i.e. it is unclear what fields are used in what state. Using parameterized `Receive` "factory" -methods we can keep data private that is only relevant to the state. It is still a good exercise to -rewrite the query using @scala[`var`s] @java[mutable fields] instead of `context.become()`. However, it is recommended to get comfortable -with the solution we have used here as it helps structuring more complex actor code in a cleaner and more maintainable way. - -Our query actor is now done: +Just as we did with the device, we test this new functionality. We also test that the actors returned for the two different IDs are actually different, and we also attempt to record a temperature reading for each of the devices to see if the actors are responding. Scala -: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_4/DeviceGroupQuery.scala) { #query-full } +: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-test-registration } Java -: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_4/DeviceGroupQuery.java) { #query-full } +: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-test-registration } -## Testing - -Now let's verify the correctness of the query actor implementation. There are various scenarios we need to test individually to make -sure everything works as expected. To be able to do this, we need to simulate the device actors somehow to exercise -various normal or failure scenarios. Thankfully we took the list of collaborators (actually a `Map`) as a parameter -to the query actor, so we can easily pass in @scala[`TestProbe`] @java[`TestKit`] references. In our first test, we try out the case when -there are two devices and both report a temperature: +If a device actor already exists for the registration request, we would like to use +the existing actor instead of a new one. We have not tested this yet, so we need to fix this: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_4/DeviceGroupQuerySpec.scala) { #query-test-normal } +: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-test3 } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupQueryTest.java) { #query-test-normal } +: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-test3 } -That was the happy case, but we know that sometimes devices cannot provide a temperature measurement. This -scenario is just slightly different from the previous: + +### Keeping track of the device actors in the group + +So far, we have implemented logic for registering device actors in the group. Devices come and go, however, so we will need a way to remove device actors from the @scala[`Map[String, ActorRef]`] @java[`Map`]. We will assume that when a device is removed, its corresponding device actor is simply stopped. Supervision, as we discussed earlier, only handles error scenarios — not graceful stopping. So we need to notify the parent when one of the device actors is stopped. + +Akka provides a _Death Watch_ feature that allows an actor to _watch_ another actor and be notified if the other actor is stopped. Unlike supervision, watching is not limited to parent-child relationships, any actor can watch any other actor as long as it knows the `ActorRef`. After a watched actor stops, the watcher receives a `Terminated(actorRef)` message which also contains the reference to the watched actor. The watcher can either handle this message explicitly or will fail with a `DeathPactException`. This latter is useful if the actor can no longer perform its own duties after the watched actor has been stopped. In our case, the group should still function after one device have been stopped, so we need to handle the `Terminated(actorRef)` message. + +Our device group actor needs to include functionality that: + + 1. Starts watching new device actors when they are created. + 2. Removes a device actor from the @scala[`Map[String, ActorRef]`] @java[`Map`] — which maps devices to device actors — when the notification indicates it has stopped. + +Unfortunately, the `Terminated` message only contains the `ActorRef` of the child actor. We need the actor's ID to remove it from the map of existing device to device actor mappings. To be able to do this removal, we need to introduce another placeholder, @scala[`Map[ActorRef, String]`] @java[`Map`], that allow us to find out the device ID corresponding to a given `ActorRef`. + +Adding the functionality to identify the actor results in this: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_4/DeviceGroupQuerySpec.scala) { #query-test-no-reading } +: @@snip [DeviceGroup.scala]($code$/scala/tutorial_4/DeviceGroup.scala) { #device-group-remove } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupQueryTest.java) { #query-test-no-reading } +: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-remove } -We also know, that sometimes device actors stop before answering: +So far we have no means to get which devices the group device actor keeps track of and, therefore, we cannot test our new functionality yet. To make it testable, we add a new query capability (message @scala[`RequestDeviceList(requestId: Long)`] @java[`RequestDeviceList`]) that simply lists the currently active +device IDs: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_4/DeviceGroupQuerySpec.scala) { #query-test-stopped } +: @@snip [DeviceGroup.scala]($code$/scala/tutorial_4/DeviceGroup.scala) { #device-group-full } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupQueryTest.java) { #query-test-stopped } +: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_4/DeviceGroup.java) { #device-group-full } -If you remember, there is another case related to device actors stopping. It is possible that we get a normal reply -from a device actor, but then receive a `Terminated` for the same actor later. In this case, we would like to keep -the first reply and not mark the device as `DeviceNotAvailable`. We should test this, too: +We are almost ready to test the removal of devices. But, we still need the following capabilities: + + * To stop a device actor from our test case. From the outside, any actor can be stopped by simply sending a special + the built-in message, `PoisonPill`, which instructs the actor to stop. + * To be notified once the device actor is stopped. We can use the _Death Watch_ facility for this purpose, too. The @scala[`TestProbe`] @java[`TestKit`] has two messages that we can easily use, `watch()` to watch a specific actor, and `expectTerminated` + to assert that the watched actor has been terminated. + +We add two more test cases now. In the first, we just test that we get back the list of proper IDs once we have added a few devices. The second test case makes sure that the device ID is properly removed after the device actor has been stopped: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_4/DeviceGroupQuerySpec.scala) { #query-test-stopped-later } +: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_4/DeviceGroupSpec.scala) { #device-group-list-terminate-test } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupQueryTest.java) { #query-test-stopped-later } +: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupTest.java) { #device-group-list-terminate-test } -The final case is when not all devices respond in time. To keep our test relatively fast, we will construct the -`DeviceGroupQuery` actor with a smaller timeout: +## Creating device manager actors + +Going up to the next level in our hierarchy, we need to create the entry point for our device manager component in the `DeviceManager` source file. This actor is very similar to the device group actor, but creates device group actors instead of device actors: Scala -: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_4/DeviceGroupQuerySpec.scala) { #query-test-timeout } +: @@snip [DeviceManager.scala]($code$/scala/tutorial_4/DeviceManager.scala) { #device-manager-full } Java -: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupQueryTest.java) { #query-test-timeout } +: @@snip [DeviceManager.java]($code$/java/jdocs/tutorial_4/DeviceManager.java) { #device-manager-full } -Our query works as expected now, it is time to include this new functionality in the `DeviceGroup` actor now. +We leave tests of the device manager as an exercise for you since it is very similar to the tests we have already written for the group +actor. -## Adding the Query Capability to the Group +## What's next? -Including the query feature in the group actor is fairly simple now. We did all the heavy lifting in the query actor -itself, the group actor only needs to create it with the right initial parameters and nothing else. +We have now a hierarchical component for registering and tracking devices and recording measurements. We have seen how to implement different types of conversation patterns, such as: -Scala -: @@snip [DeviceGroup.scala]($code$/scala/tutorial_4/DeviceGroup.scala) { #query-added } + * Request-respond (for temperature recordings) + * Delegate-respond (for registration of devices) + * Create-watch-terminate (for creating the group and device actor as children) -Java -: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_4/DeviceGroup.java) { #query-added } - -It is probably worth to reiterate what we said at the beginning of the chapter. By keeping the temporary state -that is only relevant to the query itself in a separate actor we keep the group actor implementation very simple. It delegates -everything to child actors and therefore does not have to keep state that is not relevant to its core business. Also, multiple queries can -now run parallel to each other, in fact, as many as needed. In our case querying an individual device actor is a fast operation, but -if this were not the case, for example, because the remote sensors need to be contacted over the network, this design -would significantly improve throughput. - -We close this chapter by testing that everything works together. This test is just a variant of the previous ones, -now exercising the group query feature: - -Scala -: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_4/DeviceGroupSpec.scala) { #group-query-integration-test } - -Java -: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_4/DeviceGroupTest.java) { #group-query-integration-test } +In the next chapter, we will introduce group query capabilities, which will establish a new conversation pattern of scatter-gather. In particular, we will implement the functionality that allows users to query the status of all the devices belonging to a group. diff --git a/akka-docs/src/main/paradox/scala/guide/tutorial_5.md b/akka-docs/src/main/paradox/scala/guide/tutorial_5.md new file mode 100644 index 0000000000..4fae8af2e4 --- /dev/null +++ b/akka-docs/src/main/paradox/scala/guide/tutorial_5.md @@ -0,0 +1,253 @@ +# Part 5: Querying Device Groups + +The conversational patterns that we have seen so far are simple in the sense that they require the actor to keep little or no state. Specifically: + +* Device actors return a reading, which requires no state change +* Record a temperature, which updates a single field +* Device Group actors maintain group membership by simply adding or removing entries from a map + +In this part, we will use a more complex example. Since homeowners will be interested in the temperatures throughout their home, our goal is to be able to query all of the device actors in a group. Let us start by investigating how such a query API should behave. + +## Dealing with possible scenarios +The very first issue we face is that the membership of a group is dynamic. Each sensor device is represented by an actor that can stop at any time. At the beginning of the query, we can ask all of the existing device actors for the current temperature. However, during the lifecycle of the query: + + * A device actor might stop and not be able to respond back with a temperature reading. + * A new device actor might start up and not be included in the query because we weren't aware of it. + +These issues can be addressed in many different ways, but the important point is to settle on the desired behavior. The following works well for our use case: + + * When a query arrives, the group actor takes a _snapshot_ of the existing device actors and will only ask those actors for the temperature. + * Actors that start up _after_ the query arrives are simply ignored. + * If an actor in the snapshot stops during the query without answering, we will simply report the fact that it stopped to the sender of the query message. + +Apart from device actors coming and going dynamically, some actors might take a long time to answer. For example, they could be stuck in an accidental infinite loop, or fail due to a bug and drop our request. We don't want the query to continue indefinitely, so we will consider it complete in either of the following cases: + +* All actors in the snapshot have either responded or have confirmed being stopped. +* We reach a pre-defined deadline. + +Given these decisions, along with the fact that a device in the snapshot might have just started and not yet received a temperature to record, we can define four states +for each device actor, with respect to a temperature query: + + * It has a temperature available: @scala[`Temperature(value)`] @java[`Temperature`]. + * It has responded, but has no temperature available yet: `TemperatureNotAvailable`. + * It has stopped before answering: `DeviceNotAvailable`. + * It did not respond before the deadline: `DeviceTimedOut`. + +Summarizing these in message types we can add the following to `DeviceGroup`: + +Scala +: @@snip [DeviceGroup.scala]($code$/scala/tutorial_5/DeviceGroup.scala) { #query-protocol } + +Java +: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_5/DeviceGroup.java) { #query-protocol } + +## Implementing the query + +One approach for implementing the query involves adding code to the group device actor. However, in practice this can be very cumbersome and error prone. Remember that when we start a query, we need to take a snapshot of the devices present and start a timer so that we can enforce the deadline. In the meantime, _another query_ can arrive. For the second query, of course, we need to keep track of the exact same information but in isolation from the previous query. This would require us to maintain separate mappings between queries and device actors. + +Instead, we will implement a simpler, and superior approach. We will create an actor that represents a _single query_ and that performs the tasks needed to complete the query on behalf of the group actor. So far we have created actors that belonged to classical domain objects, but now, we will create an +actor that represents a process or a task rather than an entity. We benefit by keeping our group device actor simple and being able to better test query capability in isolation. + +### Defining the query actor + +First, we need to design the lifecycle of our query actor. This consists of identifying its initial state, the first action it will take, and the cleanup — if necessary. The query actor will need the following information: + + * The snapshot and IDs of active device actors to query. + * The ID of the request that started the query (so that we can include it in the reply). + * The reference of the actor who sent the query. We will send the reply to this actor directly. + * A deadline that indicates how long the query should wait for replies. Making this a parameter will simplify testing. + +#### Scheduling the query timeout +Since we need a way to indicate how long we are willing to wait for responses, it is time to introduce a new Akka feature that we have +not used yet, the built-in scheduler facility. Using the scheduler is simple: + +* We get the scheduler from the `ActorSystem`, which, in turn, +is accessible from the actor's context: @scala[`context.system.scheduler`]@java[`getContext().getSystem().scheduler()`]. This needs an @scala[implicit] `ExecutionContext` which +is basically the thread-pool that will execute the timer task itself. In our case, we use the same dispatcher +as the actor by @scala[importing `import context.dispatcher`] @java[passing in `getContext().dispatcher()`]. +* The +@scala[`scheduler.scheduleOnce(time, actorRef, message)`] @java[`scheduler.scheduleOnce(time, actorRef, message, executor, sender)`] method will schedule the message `message` into the future by the +specified `time` and send it to the actor `actorRef`. + +We need to create a message that represents the query timeout. We create a simple message `CollectionTimeout` without any parameters for this purpose. The return value from `scheduleOnce` is a `Cancellable` which can be used to cancel the timer if the query finishes successfully in time. At the start of the query, we need to ask each of the device actors for the current temperature. To be able to quickly +detect devices that stopped before they got the `ReadTemperature` message we will also watch each of the actors. This +way, we get `Terminated` messages for those that stop during the lifetime of the query, so we don't need to wait +until the timeout to mark these as not available. + +Putting this together, the outline of our `DeviceGroupQuery` actor looks like this: + +Scala +: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_5/DeviceGroupQuery.scala) { #query-outline } + +Java +: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-outline } + +#### Tracking actor state + +The query actor, apart from the pending timer, has one stateful aspect, tracking the set of actors that: have replied, have stopped, or have not replied. One way to track this state is +to create a mutable field in the actor @scala[(a `var`)]. A different approach takes advantage of the ability to change how +an actor responds to messages. A +`Receive` is just a function (or an object, if you like) that can be returned from another function. By default, the `receive` block defines the behavior of the actor, but it is possible to change it multiple times during the life of the actor. We simply call `context.become(newBehavior)` +where `newBehavior` is anything with type `Receive` @scala[(which is just a shorthand for `PartialFunction[Any, Unit]`)]. We will leverage this +feature to track the state of our actor. + +For our use case: + +1. Instead of defining `receive` directly, we delegate to a `waitingForReplies` function to create the `Receive`. +1. The `waitingForReplies` function will keep track of two changing values: + * a `Map` of already received replies + * a `Set` of actors that we still wait on +1. We have three events to act on: + * We can receive a +`RespondTemperature` message from one of the devices. + * We can receive a `Terminated` message for a device actor +that has been stopped in the meantime. + * We can reach the deadline and receive a `CollectionTimeout`. + +In the first two cases, we need to keep track of the replies, which we now simply delegate to a method `receivedResponse`, which we will discuss later. In the case of timeout, we need to simply take all the actors that have not yet replied yet (the members of the set `stillWaiting`) and put a `DeviceTimedOut` as the status in the final reply. Then we reply to the submitter of the query with the collected results and stop the query actor. + +To accomplish this, add the following to your `DeviceGroupQuery` source file: + + + +Scala +: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_5/DeviceGroupQuery.scala) { #query-state } + +Java +: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-state } + +It is not yet clear how we will "mutate" the `repliesSoFar` and `stillWaiting` data structures. One important thing to note is that the function `waitingForReplies` **does not handle the messages directly. It returns a `Receive` function that will handle the messages**. This means that if we call `waitingForReplies` again, with different parameters, +then it returns a brand new `Receive` that will use those new parameters. + +We have seen how we +can install the initial `Receive` by simply returning it from `receive`. In order to install a new one, to record a +new reply, for example, we need some mechanism. This mechanism is the method `context.become(newReceive)` which will +_change_ the actor's message handling function to the provided `newReceive` function. You can imagine that before +starting, your actor automatically calls `context.become(receive)`, i.e. installing the `Receive` function that +is returned from `receive`. This is another important observation: **it is not `receive` that handles the messages, +it just returns a `Receive` function that will actually handle the messages**. + +We now have to figure out what to do in `receivedResponse`. First, we need to record the new result in the map `repliesSoFar` and remove the actor from `stillWaiting`. The next step is to check if there are any remaining actors we are waiting for. If there is none, we send the result of the query to the original requester and stop the query actor. Otherwise, we need to update the `repliesSoFar` and `stillWaiting` structures and wait for more +messages. + +In the code before, we treated `Terminated` as the implicit response `DeviceNotAvailable`, so `receivedResponse` does +not need to do anything special. However, there is one small task we still need to do. It is possible that we receive a proper +response from a device actor, but then it stops during the lifetime of the query. We don't want this second event +to overwrite the already received reply. In other words, we don't want to receive `Terminated` after we recorded the +response. This is simple to achieve by calling `context.unwatch(ref)`. This method also ensures that we don't +receive `Terminated` events that are already in the mailbox of the actor. It is also safe to call this multiple times, +only the first call will have any effect, the rest is simply ignored. + +With all this knowledge, we can create the `receivedResponse` method: + +Scala +: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_5/DeviceGroupQuery.scala) { #query-collect-reply } + +Java +: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-collect-reply } + +It is quite natural to ask at this point, what have we gained by using the `context.become()` trick instead of +just making the `repliesSoFar` and `stillWaiting` structures mutable fields of the actor (i.e. `var`s)? In this +simple example, not that much. The value of this style of state keeping becomes more evident when you suddenly have +_more kinds_ of states. Since each state +might have temporary data that is relevant itself, keeping these as fields would pollute the global state +of the actor, i.e. it is unclear what fields are used in what state. Using parameterized `Receive` "factory" +methods we can keep data private that is only relevant to the state. It is still a good exercise to +rewrite the query using @scala[`var`s] @java[mutable fields] instead of `context.become()`. However, it is recommended to get comfortable +with the solution we have used here as it helps structuring more complex actor code in a cleaner and more maintainable way. + +Our query actor is now done: + +Scala +: @@snip [DeviceGroupQuery.scala]($code$/scala/tutorial_5/DeviceGroupQuery.scala) { #query-full } + +Java +: @@snip [DeviceGroupQuery.java]($code$/java/jdocs/tutorial_5/DeviceGroupQuery.java) { #query-full } + +### Testing the query actor + +Now let's verify the correctness of the query actor implementation. There are various scenarios we need to test individually to make +sure everything works as expected. To be able to do this, we need to simulate the device actors somehow to exercise +various normal or failure scenarios. Thankfully we took the list of collaborators (actually a `Map`) as a parameter +to the query actor, so we can easily pass in @scala[`TestProbe`] @java[`TestKit`] references. In our first test, we try out the case when +there are two devices and both report a temperature: + +Scala +: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-normal } + +Java +: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-normal } + +That was the happy case, but we know that sometimes devices cannot provide a temperature measurement. This +scenario is just slightly different from the previous: + +Scala +: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-no-reading } + +Java +: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-no-reading } + +We also know, that sometimes device actors stop before answering: + +Scala +: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-stopped } + +Java +: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-stopped } + +If you remember, there is another case related to device actors stopping. It is possible that we get a normal reply +from a device actor, but then receive a `Terminated` for the same actor later. In this case, we would like to keep +the first reply and not mark the device as `DeviceNotAvailable`. We should test this, too: + +Scala +: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-stopped-later } + +Java +: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-stopped-later } + +The final case is when not all devices respond in time. To keep our test relatively fast, we will construct the +`DeviceGroupQuery` actor with a smaller timeout: + +Scala +: @@snip [DeviceGroupQuerySpec.scala]($code$/scala/tutorial_5/DeviceGroupQuerySpec.scala) { #query-test-timeout } + +Java +: @@snip [DeviceGroupQueryTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupQueryTest.java) { #query-test-timeout } + +Our query works as expected now, it is time to include this new functionality in the `DeviceGroup` actor now. + +## Adding query capability to the group + +Including the query feature in the group actor is fairly simple now. We did all the heavy lifting in the query actor +itself, the group actor only needs to create it with the right initial parameters and nothing else. + +Scala +: @@snip [DeviceGroup.scala]($code$/scala/tutorial_5/DeviceGroup.scala) { #query-added } + +Java +: @@snip [DeviceGroup.java]($code$/java/jdocs/tutorial_5/DeviceGroup.java) { #query-added } + +It is probably worth restating what we said at the beginning of the chapter. By keeping the temporary state that is only relevant to the query itself in a separate actor we keep the group actor implementation very simple. It delegates +everything to child actors and therefore does not have to keep state that is not relevant to its core business. Also, multiple queries can now run parallel to each other, in fact, as many as needed. In our case querying an individual device actor is a fast operation, but if this were not the case, for example, because the remote sensors need to be contacted over the network, this design would significantly improve throughput. + +We close this chapter by testing that everything works together. This test is just a variant of the previous ones, now exercising the group query feature: + +Scala +: @@snip [DeviceGroupSpec.scala]($code$/scala/tutorial_5/DeviceGroupSpec.scala) { #group-query-integration-test } + +Java +: @@snip [DeviceGroupTest.java]($code$/java/jdocs/tutorial_5/DeviceGroupTest.java) { #group-query-integration-test } + +## Summary +In the context of the IoT system, this guide introduced the following concepts, among others. You can follow the links to review them if necessary: + +* @ref:[The hierarchy of actors and their lifecycle](tutorial_1.md) +* @ref:[The importance of designing messages for flexibility](tutorial_3.md) +* @ref:[How to watch and stop actors, if necessary](tutorial_4.md#keeping-track-of-the-device-actors-in-the-group) + +## What's Next? + +To continue your journey with Akka, we recommend: + +* Start building your own applications with Akka, make sure you [get involved in our amazing community](http://akka.io/get-involved) for help if you get stuck. +* If you’d like some additional background, read the rest of the reference documentation and check out some of the @ref:[books and video’s](../additional/books.md) on Akka. diff --git a/akka-docs/src/main/paradox/scala/howto.md b/akka-docs/src/main/paradox/scala/howto.md index 456b626c14..c6f75fedb0 100644 --- a/akka-docs/src/main/paradox/scala/howto.md +++ b/akka-docs/src/main/paradox/scala/howto.md @@ -122,47 +122,7 @@ The pattern is described [Discovering Message Flows in Actor System with the Spi ## Scheduling Periodic Messages -This pattern describes how to schedule periodic messages to yourself in two different -ways. - -The first way is to set up periodic message scheduling in the constructor of the actor, -and cancel that scheduled sending in `postStop` or else we might have multiple registered -message sends to the same actor. - -@@@ note - -With this approach the scheduled periodic message send will be restarted with the actor on restarts. -This also means that the time period that elapses between two tick messages during a restart may drift -off based on when you restart the scheduled message sends relative to the time that the last message was -sent, and how long the initial delay is. Worst case scenario is `interval` plus `initialDelay`. - -@@@ - -Scala -: @@snip [SchedulerPatternSpec.scala]($code$/scala/docs/pattern/SchedulerPatternSpec.scala) { #schedule-constructor } - -Java -: @@snip [SchedulerPatternTest.java]($code$/java/jdocs/pattern/SchedulerPatternTest.java) { #schedule-constructor } - -The second variant sets up an initial one shot message send in the `preStart` method -of the actor, and the then the actor when it receives this message sets up a new one shot -message send. You also have to override `postRestart` so we don't call `preStart` -and schedule the initial message send again. - -@@@ note - -With this approach we won't fill up the mailbox with tick messages if the actor is -under pressure, but only schedule a new tick message when we have seen the previous one. - -@@@ - -Scala -: @@snip [SchedulerPatternSpec.scala]($code$/scala/docs/pattern/SchedulerPatternSpec.scala) { #schedule-receive } - -Java -: @@snip [SchedulerPatternTest.java]($code$/java/jdocs/pattern/SchedulerPatternTest.java) { #schedule-receive } - -@@@ div { .group-java } +See @ref:[Actor Timers](actors.md#actors-timers) ## Single-Use Actor Trees with High-Level Error Reporting diff --git a/akka-docs/src/main/paradox/scala/index-actors.md b/akka-docs/src/main/paradox/scala/index-actors.md index e8a6b86369..6b7db6aa6b 100644 --- a/akka-docs/src/main/paradox/scala/index-actors.md +++ b/akka-docs/src/main/paradox/scala/index-actors.md @@ -16,7 +16,6 @@ * [persistence-query](persistence-query.md) * [persistence-query-leveldb](persistence-query-leveldb.md) * [testing](testing.md) -* [actordsl](actordsl.md) * [typed-actors](typed-actors.md) -@@@ \ No newline at end of file +@@@ diff --git a/akka-docs/src/main/paradox/scala/logging.md b/akka-docs/src/main/paradox/scala/logging.md index 52d6f3f25e..3d0fae2ba1 100644 --- a/akka-docs/src/main/paradox/scala/logging.md +++ b/akka-docs/src/main/paradox/scala/logging.md @@ -12,7 +12,15 @@ synchronously. Create a `LoggingAdapter` and use the `error`, `warning`, `info`, or `debug` methods, as illustrated in this example: -@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-actor } +Scala +: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-actor } + +Java +: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports } + @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #my-actor } + +@@@ div { .group-scala } + For convenience, you can mix in the `log` member into actors, instead of defining it as above. @@ -22,14 +30,21 @@ class MyActor extends Actor with akka.actor.ActorLogging { } ``` -The second parameter to the `Logging` is the source of this logging channel. +@@@ + +The first parameter to @scala[`Logging`] @java[`Logging.getLogger`] could also be any +`LoggingBus`, specifically @scala[`system.eventStream`] @scala[`system.eventStream()`]; in the demonstrated +case, the actor system's address is included in the `akkaSource` +representation of the log source (see @ref:[Logging Thread, Akka Source and Actor System in MDC](#logging-thread-akka-source-and-actor-system-in-mdc)) +while in the second case this is not automatically done. +The second parameter to @scala[`Logging`] @java[`Logging.getLogger`] is the source of this logging channel. The source object is translated to a String according to the following rules: * if it is an Actor or ActorRef, its path is used * in case of a String it is used as is * in case of a class an approximation of its simpleName - * and in all other cases a compile error occurs unless an implicit -`LogSource[T]` is in scope for the type in question. + * and in all other cases @scala[a compile error occurs unless an implicit +`LogSource[T]` is in scope for the type in question] @java[the simpleName of its class] The log message may contain argument placeholders `{}`, which will be substituted if the log level is enabled. Giving more arguments than @@ -37,7 +52,11 @@ placeholders results in a warning being appended to the log statement (i.e. on the same line with the same severity). You may pass an array as the only substitution argument to have its elements be treated individually: -@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #array } +Scala +: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #array } + +Java +: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #array } The Java `Class` of the log source is also included in the generated `LogEvent`. In case of a simple string this is replaced with a “marker” @@ -51,7 +70,7 @@ use. By default messages sent to dead letters are logged at info level. Existence of dead letters does not necessarily indicate a problem, but they are logged by default for the sake of caution. After a few messages this logging is turned off, to avoid flooding the logs. -You can disable this logging completely or adjust how many dead letters that are +You can disable this logging completely or adjust how many dead letters are logged. During system shutdown it is likely that you see dead letters, since pending messages in the actor mailboxes are sent to dead letters. You can also disable logging of dead letters during shutdown. @@ -88,6 +107,8 @@ akka { } ``` +@@@ div { .group-scala } + If you want very detailed logging of user-level messages then wrap your actors' behaviors with `akka.event.LoggingReceive` and enable the `receive` option: @@ -103,6 +124,8 @@ akka { } ``` +@@@ + If you want very detailed logging of all automatically received messages that are processed by Actors: @@ -212,7 +235,9 @@ akka { } ``` -Also see the logging options for TestKit: @ref:[actor.logging-scala](testing.md#actor-logging). +Also see the @ref:[logging options for TestKit](testing.md#actor-logging). + +@@@ div { .group-scala } ### Translating Log Source to String and Class @@ -229,7 +254,9 @@ loggers, which are based upon the originating object’s class name as log category. The override of `getClazz` is only included for demonstration purposes as it contains exactly the default behavior. -@@@ note +@@@ + +@@@ note { .group-scala } You may also create the string representation up front and pass that in as the log source, but be aware that then the `Class[_]` which will be @@ -242,6 +269,7 @@ might want to do this also in case you implement your own logging adapter. @@@ + ### Turn Off Logging To turn off logging you can configure the log levels to be `OFF` like this. @@ -286,12 +314,17 @@ akka { ``` The default one logs to STDOUT and is registered by default. It is not intended -to be used for production. There is also an [SLF4J](#slf4j) +to be used for production. There is also an @ref:[SLF4J](#slf4j) logger available in the 'akka-slf4j' module. Example of creating a listener: -@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-event-listener } +Scala +: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #my-event-listener } + +Java +: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports #imports-listener } + @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #my-event-listener } ## Logging to stdout during startup and shutdown @@ -306,10 +339,21 @@ stdout logger is `WARNING` and it can be silenced completely by setting Akka provides a logger for [SL4FJ](http://www.slf4j.org/). This module is available in the 'akka-slf4j.jar'. It has a single dependency: the slf4j-api jar. In your runtime, you also need a SLF4J backend. We recommend [Logback](http://logback.qos.ch/): -```scala +sbt +: ```scala libraryDependencies += "ch.qos.logback" % "logback-classic" % "1.2.3" ``` +maven +: ```xml + + ch.qos.logback + logback-classic + 1.2.3 + + ``` + + You need to enable the Slf4jLogger in the `loggers` element in the @ref:[configuration](general/configuration.md). Here you can also define the log level of the event bus. More fine grained log levels can be defined in the configuration of the SLF4J backend @@ -336,10 +380,10 @@ akka { One gotcha is that the timestamp is attributed in the event handler, not when actually doing the logging. The SLF4J logger selected for each log event is chosen based on the -`Class[_]` of the log source specified when creating the +@scala[`Class[_]`] @java[`Class`] of the log source specified when creating the `LoggingAdapter`, unless that was given directly as a string in which -case that string is used (i.e. `LoggerFactory.getLogger(c: Class[_])` is used in -the first case and `LoggerFactory.getLogger(s: String)` in the second). +case that string is used (i.e. @scala[`LoggerFactory.getLogger(c: Class[_])`] @java[`LoggerFactory.getLogger(Class c)`] is used in +the first case and @scala[`LoggerFactory.getLogger(s: String)`] @java[`LoggerFactory.getLogger(String s)`] in the second). @@@ note @@ -350,10 +394,16 @@ shown below: @@@ -```scala +Scala +: ```scala val log = Logging(system.eventStream, "my.nice.string") ``` +Java +: ```java + final LoggingAdapter log = Logging.getLogger(system.eventStream(), "my.string"); + ``` + ### Using the SLF4J API directly @@ -433,25 +483,39 @@ If you want to more accurately output the timestamp, use the MDC attribute `akka One useful feature available in Slf4j is [MDC](http://logback.qos.ch/manual/mdc.html), Akka has a way to let the application specify custom values, you just need to get a specialized `LoggingAdapter`, the `DiagnosticLoggingAdapter`. In order to -get it you can use the factory, providing an Actor as logSource: +get it you can use the factory, providing an @scala[Actor] @java[AbstractActor] as logSource: -```scala +Scala +: ```scala // Within your Actor val log: DiagnosticLoggingAdapter = Logging(this); ``` +Java +: ```java + // Within your AbstractActor + final DiagnosticLoggingAdapter log = Logging.getLogger(this); + ``` + Once you have the logger, you just need to add the custom values before you log something. This way, the values will be put in the SLF4J MDC right before appending the log and removed after. @@@ note The cleanup (removal) should be done in the actor at the end, -otherwise, next message will log with same mdc values, +otherwise, next message will log with same MDC values, if it is not set to a new map. Use `log.clearMDC()`. @@@ -@@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #mdc } +Scala +: @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #mdc } + +Java +: @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #imports-mdc } + @@snip [LoggingDocTest.java]($code$/java/jdocs/event/LoggingDocTest.java) { #mdc-actor } + +@@@ div { .group-scala } For convenience, you can mix in the `log` member into actors, instead of defining it as above. This trait also lets you override `def mdc(msg: Any): MDC` for specifying MDC values @@ -459,6 +523,8 @@ depending on current message and lets you forget about the cleanup as well, sinc @@snip [LoggingDocSpec.scala]($code$/scala/docs/event/LoggingDocSpec.scala) { #mdc-actor } +@@@ + Now, the values will be available in the MDC, so you can use them in the layout pattern: ``` @@ -494,6 +560,16 @@ A more advanced (including most Akka added information) example pattern would be %date{ISO8601} level=[%level] marker=[%marker] logger=[%logger] akkaSource=[%X{akkaSource}] sourceActorSystem=[%X{sourceActorSystem}] sourceThread=[%X{sourceThread}] mdc=[ticket-#%X{ticketNumber}: %X{ticketDesc}] - msg=[%msg]%n----%n ``` +#### Using SLF4J's Markers + +It is also possible to use the `org.slf4j.Marker` with the `LoggingAdapter` when using slf4j. + +Since the akka-actor library avoids depending on any specific logging library, the support for this is included in `akka-slf4j`, +which provides the `Slf4jLogMarker` type which can be passed in as first argument instead of the logging framework agnostic LogMarker +type from `akka-actor`. The most notable difference between the two is that slf4j's Markers can have child markers, so one can +rely more information using them rather than just a single string. + + ## java.util.logging @@ -525,10 +601,10 @@ akka { One gotcha is that the timestamp is attributed in the event handler, not when actually doing the logging. The `java.util.logging.Logger` selected for each log event is chosen based on the -`Class[_]` of the log source specified when creating the +@scala[`Class[_]`]@java[`Class`] of the log source specified when creating the `LoggingAdapter`, unless that was given directly as a string in which -case that string is used (i.e. `LoggerFactory.getLogger(c: Class[_])` is used in -the first case and `LoggerFactory.getLogger(s: String)` in the second). +case that string is used (i.e. @scala[`LoggerFactory.getLogger(c: Class[_])`] @java[`LoggerFactory.getLogger(Class c)`] is used in +the first case and @scala[`LoggerFactory.getLogger(s: String)`] @java[`LoggerFactory.getLogger(String s)`] in the second). @@@ note @@ -539,6 +615,12 @@ shown below: @@@ -```scala +Scala +: ```scala val log = Logging(system.eventStream, "my.nice.string") ``` + +Java +: ```java + final LoggingAdapter log = Logging.getLogger(system.eventStream(), "my.string"); + ``` diff --git a/akka-docs/src/main/paradox/scala/mailboxes.md b/akka-docs/src/main/paradox/scala/mailboxes.md index 5ac6750abe..b8a47c70a5 100644 --- a/akka-docs/src/main/paradox/scala/mailboxes.md +++ b/akka-docs/src/main/paradox/scala/mailboxes.md @@ -9,12 +9,16 @@ all routees will share a single mailbox instance. ### Requiring a Message Queue Type for an Actor It is possible to require a certain type of message queue for a certain type of actor -by having that actor extend the parameterized trait `RequiresMessageQueue`. Here is +by having that actor @scala[extend]@java[implement] the parameterized @scala[trait]@java[interface] `RequiresMessageQueue`. Here is an example: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #required-mailbox-class } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #required-mailbox-class } -The type parameter to the `RequiresMessageQueue` trait needs to be mapped to a mailbox in +Java +: @@snip [MyBoundedActor.java]($code$/java/jdocs/actor/MyBoundedActor.java) { #my-bounded-untyped-actor } + +The type parameter to the `RequiresMessageQueue` @scala[trait]@java[interface] needs to be mapped to a mailbox in configuration like this: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #bounded-mailbox-config #required-mailbox-config } @@ -26,7 +30,7 @@ a dispatcher with a specified mailbox type, then that will override this mapping @@@ note The type of the queue in the mailbox created for an actor will be checked against the required type in the -trait and if the queue doesn't implement the required type then actor creation will fail. +@scala[trait]@java[interface] and if the queue doesn't implement the required type then actor creation will fail. @@@ @@ -181,7 +185,11 @@ The following mailboxes should only be used with zero `mailbox-push-timeout-time How to create a PriorityMailbox: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox } + +Java +: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-mailbox } And then add it to the configuration: @@ -189,19 +197,35 @@ And then add it to the configuration: And then an example on how you would use it: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-dispatcher } + +Java +: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #prio-dispatcher } It is also possible to configure a mailbox type directly like this: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config #mailbox-deployment-config } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config #mailbox-deployment-config } + +Java +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #prio-mailbox-config-java #mailbox-deployment-config } And then use it either from deployment like this: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-config } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-config } + +Java +: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-config } Or code like this: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-code } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #defining-mailbox-in-code } + +Java +: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #defining-mailbox-in-code } ### ControlAwareMailbox @@ -214,17 +238,36 @@ It can be configured like this: Control messages need to extend the `ControlMessage` trait: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-messages } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-mailbox-messages } + +Java +: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-mailbox-messages } And then an example on how you would use it: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-dispatcher } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #control-aware-dispatcher } + +Java +: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #control-aware-dispatcher } ## Creating your own Mailbox type An example is worth a thousand quacks: -@@snip [MyUnboundedMailbox.scala]($code$/scala/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-implementation-example } +Scala +: @@snip [MyUnboundedMailbox.scala]($code$/scala/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-marker-interface } + +Java +: @@snip [MyUnboundedMessageQueueSemantics.java]($code$/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java) { #mailbox-marker-interface } + + +Scala +: @@snip [MyUnboundedMailbox.scala]($code$/scala/docs/dispatcher/MyUnboundedMailbox.scala) { #mailbox-implementation-example } + +Java +: @@snip [MyUnboundedMailbox.java]($code$/java/jdocs/dispatcher/MyUnboundedMailbox.java) { #mailbox-implementation-example } And then you just specify the FQCN of your MailboxType as the value of the "mailbox-type" in the dispatcher configuration, or the mailbox configuration. @@ -247,7 +290,11 @@ You can also use the mailbox as a requirement on the dispatcher like this: Or by defining the requirement on your actor class like this: -@@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #require-mailbox-on-actor } +Scala +: @@snip [DispatcherDocSpec.scala]($code$/scala/docs/dispatcher/DispatcherDocSpec.scala) { #require-mailbox-on-actor } + +Java +: @@snip [DispatcherDocTest.java]($code$/java/jdocs/dispatcher/DispatcherDocTest.java) { #require-mailbox-on-actor } ## Special Semantics of `system.actorOf` @@ -260,12 +307,25 @@ puts those inside the reference. Until that has happened, messages sent to the `ActorRef` will be queued locally, and only upon swapping the real filling in will they be transferred into the real mailbox. Thus, -```scala -val props: Props = ... -// this actor uses MyCustomMailbox, which is assumed to be a singleton -system.actorOf(props.withDispatcher("myCustomMailbox")) ! "bang" -assert(MyCustomMailbox.instance.getLastEnqueuedMessage == "bang") -``` +Scala +: @@@vars + ```scala + val props: Props = ... + // this actor uses MyCustomMailbox, which is assumed to be a singleton + system.actorOf(props.withDispatcher("myCustomMailbox")) ! "bang" + assert(MyCustomMailbox.instance.getLastEnqueuedMessage == "bang") + ``` + @@@ + +Java +: @@@vars + ```java + final Props props = ... + // this actor uses MyCustomMailbox, which is assumed to be a singleton + system.actorOf(props.withDispatcher("myCustomMailbox").tell("bang", sender); + assert(MyCustomMailbox.getInstance().getLastEnqueued().equals("bang")); + ``` + @@@ will probably fail; you will have to allow for some time to pass and retry the check à la `TestKit.awaitCond`. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/multi-jvm-testing.md b/akka-docs/src/main/paradox/scala/multi-jvm-testing.md index c9cc837f44..41cefe16b0 100644 --- a/akka-docs/src/main/paradox/scala/multi-jvm-testing.md +++ b/akka-docs/src/main/paradox/scala/multi-jvm-testing.md @@ -117,8 +117,8 @@ You can change the name of the multi-JVM test source directory by adding the fol configuration to your project: ```none -unmanagedSourceDirectories in MultiJvm <<= - Seq(baseDirectory(_ / "src/some_directory_here")).join +unmanagedSourceDirectories in MultiJvm := + Seq(baseDirectory(_ / "src/some_directory_here")).join.value ``` You can change what the `MultiJvm` identifier is. For example, to change it to diff --git a/akka-docs/src/main/paradox/scala/persistence.md b/akka-docs/src/main/paradox/scala/persistence.md index 6701e2bd87..d7bbd3827d 100644 --- a/akka-docs/src/main/paradox/scala/persistence.md +++ b/akka-docs/src/main/paradox/scala/persistence.md @@ -17,14 +17,21 @@ concepts and architecture of [eventsourced](https://github.com/eligosource/event Akka persistence is a separate jar file. Make sure that you have the following dependency in your project: -Scala +sbt : @@@vars ``` "com.typesafe.akka" %% "akka-persistence" % "$akka.version$" ``` @@@ -Java +gradle +: @@@vars + ``` + compile group: 'com.typesafe.akka', name: 'akka-persistence_$scala.binary_version$', version: '$akka.version$' + ``` + @@@ + +maven : @@@vars ``` @@ -40,7 +47,7 @@ in-memory heap based journal, local file-system based snapshot-store and LevelDB LevelDB based plugins will require the following additional dependency declaration: -Scala +sbt : @@@vars ``` "org.iq80.leveldb" % "leveldb" % "0.7" @@ -48,7 +55,15 @@ Scala ``` @@@ -Java +gradle +: @@@vars + ``` + compile group: 'org.iq80.leveldb', name: 'leveldb', version: '0.7' + compile group: 'org.fusesource.leveldbjni', name: 'leveldbjni-all', version: '1.8' + ``` + @@@ + +maven : @@@vars ``` @@ -76,7 +91,7 @@ case of sender and receiver JVM crashes. are journaled and which are received by the persistent actor without being journaled. Journal maintains `highestSequenceNr` that is increased on each message. The storage backend of a journal is pluggable. The persistence extension comes with a "leveldb" journal plugin, which writes to the local filesystem. Replicated journals are available as [Community plugins](http://akka.io/community/). - * *Snapshot store*: A snapshot store persists snapshots of a persistent actor's or a view's internal state. Snapshots are + * *Snapshot store*: A snapshot store persists snapshots of a persistent actor's internal state. Snapshots are used for optimizing recovery times. The storage backend of a snapshot store is pluggable. The persistence extension comes with a "local" snapshot storage plugin, which writes to the local filesystem. * *Event sourcing*. Based on the building blocks described above, Akka persistence provides abstractions for the @@ -177,7 +192,7 @@ By default, a persistent actor is automatically recovered on start and on restar New messages sent to a persistent actor during recovery do not interfere with replayed messages. They are stashed and received by a persistent actor after recovery phase completes. -The number of concurrent recoveries of recoveries that can be in progress at the same time is limited +The number of concurrent recoveries that can be in progress at the same time is limited to not overload the system and the backend data store. When exceeding the limit the actors will wait until other recoveries have been completed. This is configured by: @@ -187,7 +202,7 @@ akka.persistence.max-concurrent-recoveries = 50 @@@ note -Accessing the @scala[`sender()`]@java[sender with `getSender()] for replayed messages will always result in a `deadLetters` reference, +Accessing the @scala[`sender()`]@java[sender with `getSender()`] for replayed messages will always result in a `deadLetters` reference, as the original sender is presumed to be long gone. If you indeed have to notify an actor during recovery in the future, store its `ActorPath` explicitly in your persisted events. @@ -209,9 +224,8 @@ Scala Java : @@snip [LambdaPersistenceDocTest.java]($code$/java/jdocs/persistence/LambdaPersistenceDocTest.java) { #recovery-no-snap } -Another example, which can be fun for experiments but probably not in a real application, is setting an -upper bound to the replay which allows the actor to be replayed to a certain point "in the past" -instead to its most up to date state. Note that after that it is a bad idea to persist new +Another possible recovery customization, which can be useful for debugging, is setting an +upper bound on the replay, causing the actor to be replayed only up to a certain point "in the past" (instead of being replayed to its most up to date state). Note that after that it is a bad idea to persist new events because a later recovery will probably be confused by the new events that follow the events that were previously skipped. @@ -338,7 +352,7 @@ Java @@@ note -In order to implement the pattern known as "*command sourcing*" simply call @scala[persistAsync(cmd)(...)`]@java[`persistAsync`] right away on all incoming +In order to implement the pattern known as "*command sourcing*" simply call @scala[`persistAsync(cmd)(...)`]@java[`persistAsync`] right away on all incoming messages and handle them in the callback. @@@ @@ -639,8 +653,7 @@ akka.persistence.journal.leveldb.replay-filter { ## Snapshots -Snapshots can dramatically reduce recovery times of persistent actors and views. The following discusses snapshots -in context of persistent actors but this is also applicable to persistent views. +As you model your domain using actors, you may notice that some actors may be prone to accumulating extremely long event logs and experiencing long recovery times. Sometimes, the right approach may be to split out into a set of shorter lived actors. However, when this is not an option, you can use snapshots to reduce recovery times drastically. Persistent actors can save snapshots of internal state by calling the `saveSnapshot` method. If saving of a snapshot succeeds, the persistent actor receives a `SaveSnapshotSuccess` message, otherwise a `SaveSnapshotFailure` message @@ -683,13 +696,13 @@ saved snapshot matches the specified `SnapshotSelectionCriteria` will replay all @@@ note In order to use snapshots, a default snapshot-store (`akka.persistence.snapshot-store.plugin`) must be configured, -or the @scala`PersistentActor`]@java[persistent actor] can pick a snapshot store explicitly by overriding @scala[`def snapshotPluginId: String`]@java[`String snapshotPluginId()`]. +or the @scala[`PersistentActor`]@java[persistent actor] can pick a snapshot store explicitly by overriding @scala[`def snapshotPluginId: String`]@java[`String snapshotPluginId()`]. Since it is acceptable for some applications to not use any snapshotting, it is legal to not configure a snapshot store. However, Akka will log a warning message when this situation is detected and then continue to operate until an actor tries to store a snapshot, at which point the operation will fail (by replying with an `SaveSnapshotFailure` for example). -Note that @ref:[Cluster Sharding](cluster-sharding.md) is using snapshots, so if you use Cluster Sharding you need to define a snapshot store plugin. +Note that the "persistence mode" of @ref:[Cluster Sharding](cluster-sharding.md) makes use of snapshots. If you use that mode, you'll need to define a snapshot store plugin. @@@ @@ -999,10 +1012,10 @@ Storage backends for journals and snapshot stores are pluggable in the Akka pers A directory of persistence journal and snapshot store plugins is available at the Akka Community Projects page, see [Community plugins](http://akka.io/community/) -Plugins can be selected either by "default" for all persistent actors and views, -or "individually", when a persistent actor or view defines its own set of plugins. +Plugins can be selected either by "default" for all persistent actors, +or "individually", when a persistent actor defines its own set of plugins. -When a persistent actor or view does NOT override the `journalPluginId` and `snapshotPluginId` methods, +When a persistent actor does NOT override the `journalPluginId` and `snapshotPluginId` methods, the persistence extension will use the "default" journal and snapshot-store plugins configured in `reference.conf`: ``` @@ -1026,10 +1039,34 @@ Java ### Eager initialization of persistence plugin By default, persistence plugins are started on-demand, as they are used. In some case, however, it might be beneficial -to start a certain plugin eagerly. In order to do that, you should first add the `akka.persistence.Persistence` +to start a certain plugin eagerly. In order to do that, you should first add `akka.persistence.Persistence` under the `akka.extensions` key. Then, specify the IDs of plugins you wish to start automatically under `akka.persistence.journal.auto-start-journals` and `akka.persistence.snapshot-store.auto-start-snapshot-stores`. +For example, if you want eager initialization for the leveldb journal plugin and the local snapshot store plugin, your configuration should look like this: + +``` +akka { + + extensions = [akka.persistence.Persistence] + + persistence { + + journal { + plugin = "akka.persistence.journal.leveldb" + auto-start-journals = ["akka.persistence.journal.leveldb"] + } + + snapshot-store { + plugin = "akka.persistence.snapshot-store.local" + auto-start-snapshot-stores = ["akka.persistence.snapshot-store.local"] + } + + } + +} +``` + ### Journal plugin API @@ -1065,7 +1102,7 @@ A journal plugin can be activated with the following minimal configuration: The journal plugin instance is an actor so the methods corresponding to requests from persistent actors are executed sequentially. It may delegate to asynchronous libraries, spawn futures, or delegate to other -actors to achive parallelism. +actors to achieve parallelism. The journal plugin class must have a constructor with one of these signatures: @@ -1117,19 +1154,33 @@ Don't run snapshot store tasks/futures on the system default dispatcher, since t In order to help developers build correct and high quality storage plugins, we provide a Technology Compatibility Kit ([TCK](http://en.wikipedia.org/wiki/Technology_Compatibility_Kit) for short). -The TCK is usable from Java as well as Scala projects. For @scala[Scala]@java[Java] you need to include the akka-persistence-tck dependency: +The TCK is usable from Java as well as Scala projects. To test your implementation (independently of language) you need to include the akka-persistence-tck dependency: -``` -"com.typesafe.akka" %% "akka-persistence-tck" % "$akka.version$" % "test" -``` -``` - - com.typesafe.akka - akka-persistence-tck_${scala.version} - $akka.version$ - test - -``` +sbt +: @@@vars + ``` + "com.typesafe.akka" %% "akka-persistence-tck" % "$akka.version$" % "test" + ``` + @@@ + +gradle +: @@@vars + ``` + testCompile group: 'com.typesafe.akka', name: 'akka-persistence-tck_$scala.binary_version$', version: '$akka.version$' + ``` + @@@ + +maven +: @@@vars + ``` + + com.typesafe.akka + akka-persistence-tck_$scala.binary_version$ + $akka.version$ + test + + ``` + @@@ To include the Journal TCK tests in your test suite simply extend the provided @scala[`JournalSpec`]@java[`JavaJournalSpec`]: @@ -1140,7 +1191,7 @@ Java : @@snip [LambdaPersistencePluginDocTest.java]($code$/java/jdocs/persistence/LambdaPersistencePluginDocTest.java) { #journal-tck-java } Please note that some of the tests are optional, and by overriding the `supports...` methods you give the -TCK the needed information about which tests to run. You can implement these methods using @scala[boolean falues or] the +TCK the needed information about which tests to run. You can implement these methods using @scala[boolean values or] the provided `CapabilityFlag.on` / `CapabilityFlag.off` values. We also provide a simple benchmarking class @scala[`JournalPerfSpec`]@java[`JavaJournalPerfSpec`] which includes all the tests that @scala[`JournalSpec`]@java[`JavaJournalSpec`] @@ -1181,15 +1232,25 @@ instance. Enable this plugin by defining config property: LevelDB based plugins will also require the following additional dependency declaration: -Scala +sbt : @@@vars ``` "org.iq80.leveldb" % "leveldb" % "0.7" "org.fusesource.leveldbjni" % "leveldbjni-all" % "1.8" ``` @@@ - @@@vars - ``` + +gradle +: @@@vars + ``` + compile group: 'org.iq80.leveldb', name: 'leveldb', version: '0.7' + compile group: 'org.fusesource.leveldbjni', name: 'leveldbjni-all', version: '1.8' + ``` + @@@ + +maven +: @@@vars + ``` org.iq80.leveldb leveldb @@ -1203,7 +1264,6 @@ Scala ``` @@@ - The default location of LevelDB files is a directory named `journal` in the current working directory. This location can be changed by configuration where the specified path can be relative or absolute: @@ -1283,7 +1343,7 @@ you don't have to configure it. A persistence plugin proxy allows sharing of journals and snapshot stores across multiple actor systems (on the same or on different nodes). This, for example, allows persistent actors to failover to a backup node and continue using the shared journal instance from the backup node. The proxy works by forwarding all the journal/snapshot store messages to a -single, shared, persistence plugin instance, and therefor supports any use case supported by the proxied plugin. +single, shared, persistence plugin instance, and therefore supports any use case supported by the proxied plugin. @@@ warning @@ -1362,12 +1422,12 @@ to the @ref:[reference configuration](general/configuration.md#config-akka-persi ## Multiple persistence plugin configurations -By default, a persistent actor or view will use the "default" journal and snapshot store plugins +By default, a persistent actor will use the "default" journal and snapshot store plugins configured in the following sections of the `reference.conf` configuration resource: @@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-config } -Note that in this case the actor or view overrides only the `persistenceId` method: +Note that in this case the actor overrides only the `persistenceId` method: Scala : @@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #default-plugins } @@ -1375,8 +1435,8 @@ Scala Java : @@snip [PersistenceMultiDocTest.java]($code$/java/jdocs/persistence/PersistenceMultiDocTest.java) { #default-plugins } -When the persistent actor or view overrides the `journalPluginId` and `snapshotPluginId` methods, -the actor or view will be serviced by these specific persistence plugins instead of the defaults: +When the persistent actor overrides the `journalPluginId` and `snapshotPluginId` methods, +the actor will be serviced by these specific persistence plugins instead of the defaults: Scala : @@snip [PersistenceMultiDocSpec.scala]($code$/scala/docs/persistence/PersistenceMultiDocSpec.scala) { #override-plugins } diff --git a/akka-docs/src/main/paradox/scala/project/migration-guide-2.4.x-2.5.x.md b/akka-docs/src/main/paradox/scala/project/migration-guide-2.4.x-2.5.x.md index 82edd196d5..4e6b548172 100644 --- a/akka-docs/src/main/paradox/scala/project/migration-guide-2.4.x-2.5.x.md +++ b/akka-docs/src/main/paradox/scala/project/migration-guide-2.4.x-2.5.x.md @@ -395,7 +395,7 @@ either not rely on messages being the same instance or turn the setting off. ### Wire Protocol Compatibility It is possible to use Akka Remoting between nodes running Akka 2.4.16 and 2.5-M1, but some settings have changed so you might need -to adjust some configuration as described in [mig25_rolling](#mig25-rolling). +to adjust some configuration as described in [Rolling Update](#mig25-rolling). Note however that if using Java serialization it will not be possible to mix nodes using Scala 2.11 and 2.12. @@ -416,10 +416,18 @@ Some settings have changed default value in 2.5-M1 and therefore you need to rev before doing a rolling update to 2.5-M1. Such settings are mentioned elsewhere in this migration guide and here is a summary of things to consider. - * [mig25_addser](#mig25-addser) - * [mig25_weaklyup](#mig25-weaklyup) - * [mig25_sharding_store](#mig25-sharding-store) - * [mig25_mutual](#mig25-mutual) + * [akka.actor.additional-serialization-bindings](#mig25-addser) + * [akka.cluster.allow-weakly-up-members](#mig25-weaklyup) + * [akka.cluster.sharding.state-store-mode](#mig25-sharding-store) + * [akka.remote.netty.ssl.require-mutual-authentication](#mig25-mutual) + +#### Limit lookup of routees to nodes tagged with multiple roles + +Starting with 2.5.4, cluster routing supports delivering messages to routees tagged with all specified roles +using `use-roles` (instead of `use-role` in previous versions). When doing rolling upgrades and using this new feature, +it is important to first upgrade the existing nodes to the latest version of Akka +and then start using multiple roles in a separate rolling upgrade. Otherwise, if a new node sends a message +with the restriction `use-roles = ["a", "b"]`, that will only require the "a" role on old nodes. ### Coordinated Shutdown @@ -464,7 +472,7 @@ you might need to enable/disable it in configuration when performing rolling upg ### Cluster Sharding state-store-mode -Distributed Data mode is now the default `state-store-mode` for Cluster Sharding. The persistence mode +Distributed Data mode, which was still experimental in 2.4.x, is now the default `state-store-mode` for Cluster Sharding. The persistence mode is also supported. Read more in the @ref:[documentation](../cluster-sharding.md#cluster-sharding-mode). It's important to use the same mode on all nodes in the cluster, i.e. if you perform a rolling upgrade @@ -479,6 +487,9 @@ Note that the stored @ref:[Remembering Entities](../cluster-sharding.md#cluster- be migrated to the `data` mode. Such entities must be started again in some other way when using `ddata` mode. +Rolling upgrades from clusters that already used the (then-experimental) `ddata` +mode are not supported. + ### Cluster Sharding remember entities To use *remember entities* with cluster sharding there are now an additional requirement added: the @@ -706,6 +717,8 @@ final ActorRef throttler = .run(materializer); ``` +Note, that when using `Sink.actorRef` the sender of the original message sent to the `thottler` ActorRef will be lost and messages will arrive at the `target` with the sender set to `ActorRef.noSender`. Using this construct it is currently impossible to keep the original sender. Alternatively, you can manually specify a static sender by replacing `Sink.actorRef` with @java[`Sink.foreach(msg -> target.tell(msg, whateverSender))`]@scala[`Sink.foreach(msg => target.tell(msg, whateverSender))`]. You could also calculate a sender by inspecting the `msg`. Be cautious not to use `target.tell(msg, sender())` inside of `Sink.foreach` because the result of `sender()` is undefined or will fail when executed from within a stream. + ## Akka Typed With the new term @ref:[may change](../common/may-change.md) we will no longer have a different artifact for modules that are not diff --git a/akka-docs/src/main/paradox/scala/remoting-artery.md b/akka-docs/src/main/paradox/scala/remoting-artery.md index ea51d078b2..9edae2dfe4 100644 --- a/akka-docs/src/main/paradox/scala/remoting-artery.md +++ b/akka-docs/src/main/paradox/scala/remoting-artery.md @@ -22,7 +22,7 @@ acts as a "server" to which arbitrary systems on the same network can connect to ## What is new in Artery Artery is a reimplementation of the old remoting module aimed at improving performance and stability. It is mostly -backwards compatible with the old implementation and it is a drop-in replacement in many cases. Main features +source compatible with the old implementation and it is a drop-in replacement in many cases. Main features of Artery compared to the previous implementation: * Based on [Aeron](https://github.com/real-logic/Aeron) (UDP) instead of TCP @@ -33,7 +33,7 @@ in case of heavy traffic by using a dedicated subchannel. * Support for a separate subchannel for large messages to avoid interference with smaller messages * Compression of actor paths on the wire to reduce overhead for smaller messages * Support for faster serialization/deserialization using ByteBuffers directly - * Built-in Flight-Recorder to help debugging implementation issues without polluting users logs with implementaiton + * Built-in Flight-Recorder to help debugging implementation issues without polluting users logs with implementation specific events * Providing protocol stability across major Akka versions to support rolling updates of large-scale systems @@ -45,11 +45,24 @@ are also different. The Akka remoting is a separate jar file. Make sure that you have the following dependency in your project: -@@@vars -``` -"com.typesafe.akka" %% "akka-remote" % "$akka.version$" -``` -@@@ +Scala +: @@@vars + ``` + "com.typesafe.akka" %% "akka-remote" % "$akka.version$" + ``` + @@@ + +Java +: @@@vars + ``` + + com.typesafe.akka + akka-remote_$scala.binary_version$ + $akka.version$ + + ``` + @@@ + To enable remote capabilities in your Akka project you should, at a minimum, add the following changes to your `application.conf` file: @@ -122,7 +135,7 @@ In order to communicate with an actor, it is necessary to have its `ActorRef`. I the creator of the actor (the caller of `actorOf()`) is who gets the `ActorRef` for an actor that it can then send to other actors. In other words: - * An Actor can get a remote Actor's reference simply by receiving a message from it (as it's available as *sender()* then), + * An Actor can get a remote Actor's reference simply by receiving a message from it (as it's available as @scala[`sender()`]@java[`getSender()`] then), or inside of a remote message (e.g. *PleaseReply(message: String, remoteActorRef: ActorRef)*) Alternatively, an actor can look up another located at a known path using @@ -137,10 +150,18 @@ In the next sections the two alternatives are described in detail. `actorSelection(path)` will obtain an `ActorSelection` to an Actor on a remote node, e.g.: -``` -val selection = - context.actorSelection("akka://actorSystemName@10.0.0.1:25520/user/actorName") -``` +Scala +: ``` + val selection = + context.actorSelection("akka://actorSystemName@10.0.0.1:25520/user/actorName") + ``` + +Java +: ``` + ActorSelection selection = + context.actorSelection("akka://actorSystemName@10.0.0.1:25520/user/actorName"); + ``` + As you can see from the example above the following pattern is used to find an actor on a remote node: @@ -156,9 +177,20 @@ Unlike with earlier remoting, the protocol field is always *akka* as pluggable t Once you obtained a selection to the actor you can interact with it in the same way you would with a local actor, e.g.: -``` -selection ! "Pretty awesome feature" -``` +Scala +: @@@vars + ``` + selection ! "Pretty awesome feature" + ``` + @@@ + +Java +: @@@vars + ``` + selection.tell("Pretty awesome feature", getSelf()); + ``` + @@@ + To acquire an `ActorRef` for an `ActorSelection` you need to send a message to the selection and use the `sender` reference of the reply from @@ -207,7 +239,11 @@ which in this sample corresponds to `sampleActorSystem@127.0.0.1:2553`. Once you have configured the properties above you would do the following in code: -@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } +Scala +: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #sample-actor } + +Java +: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #sample-actor } The actor class `SampleActor` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. @@ -243,15 +279,27 @@ precedence. With these imports: -@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } +Scala +: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #import } + +Java +: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #import } and a remote address like this: -@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address-artery } +Scala +: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #make-address-artery } + +Java +: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #make-address-artery } you can advise the system to create a child on that remote node like so: -@@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } +Scala +: @@snip [RemoteDeploymentDocSpec.scala]($code$/scala/docs/remoting/RemoteDeploymentDocSpec.scala) { #deploy } + +Java +: @@snip [RemoteDeploymentDocTest.java]($code$/java/jdocs/remoting/RemoteDeploymentDocTest.java) { #deploy } ### Remote deployment whitelist @@ -270,7 +318,6 @@ should not allow others to remote deploy onto it. The full settings section may Actor classes not included in the whitelist will not be allowed to be remote deployed onto this system. - ## Remote Security An `ActorSystem` should not be exposed via Akka Remote (Artery) over plain Aeron/UDP to an untrusted network (e.g. internet). @@ -280,7 +327,7 @@ so if network security is not considered as enough protection the classic remoti Best practice is that Akka remoting nodes should only be accessible from the adjacent network. -It is also security best practice to @ref:[disable the Java serializer](remoting-artery.md#disable-java-serializer-java-artery) because of +It is also security best practice to @ref:[disable the Java serializer](#disabling-the-java-serializer) because of its multiple [known attack surfaces](https://community.hpe.com/t5/Security-Research/The-perils-of-Java-deserialization/ba-p/6838995). ### Untrusted Mode @@ -307,7 +354,7 @@ as a marker trait to user-defined messages. Untrusted mode does not give full protection against attacks by itself. It makes it slightly harder to perform malicious or unintended actions but -it should be complemented with [disabled Java serializer](#disable-java-serializer-scala-artery). +it should be complemented with @ref:[disabled Java serializer](#disabling-the-java-serializer) Additional protection can be achieved when running in an untrusted network by network security (e.g. firewalls). @@ -318,9 +365,10 @@ permission to receive actor selection messages can be granted to specific actors defined in configuration: ``` -akka.remote.artery..trusted-selection-paths = ["/user/receptionist", "/user/namingService"] +akka.remote.artery.trusted-selection-paths = ["/user/receptionist", "/user/namingService"] ``` + The actual message must still not be of type `PossiblyHarmful`. In summary, the following operations are ignored by a system configured in @@ -485,7 +533,11 @@ remained the same, we recommend reading the @ref:[Serialization](serialization.m Implementing an `akka.serialization.ByteBufferSerializer` works the same way as any other serializer, -@@snip [Serializer.scala]($akka$/akka-actor/src/main/scala/akka/serialization/Serializer.scala) { #ByteBufferSerializer } +Scala +: @@snip [Serializer.scala]($akka$/akka-actor/src/main/scala/akka/serialization/Serializer.scala) { #ByteBufferSerializer } + +Java +: @@snip [ByteBufferSerializerDocTest.java]($code$/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #ByteBufferSerializer-interface } Implementing a serializer for Artery is therefore as simple as implementing this interface, and binding the serializer as usual (which is explained in @ref:[Serialization](serialization.md)). @@ -496,9 +548,12 @@ The array based methods will be used when `ByteBuffer` is not used, e.g. in Akka Note that the array based methods can be implemented by delegation like this: -@@snip [ByteBufferSerializerDocSpec.scala]($code$/scala/docs/actor/ByteBufferSerializerDocSpec.scala) { #bytebufserializer-with-manifest } +Scala +: @@snip [ByteBufferSerializerDocSpec.scala]($code$/scala/docs/actor/ByteBufferSerializerDocSpec.scala) { #bytebufserializer-with-manifest } + +Java +: @@snip [ByteBufferSerializerDocTest.java]($code$/java/jdocs/actor/ByteBufferSerializerDocTest.java) { #bytebufserializer-with-manifest } - ### Disabling the Java Serializer It is possible to completely disable Java Serialization for the entire Actor system. @@ -566,12 +621,11 @@ This configuration setting will send messages to the defined remote actor paths. It requires that you create the destination actors on the remote nodes with matching paths. That is not done by the router. - ## Remoting Sample -You can download a ready to run @extref[remoting sample](ecs:akka-samples-remote-scala) +You can download a ready to run @scala[@extref[remoting sample](ecs:akka-samples-remote-scala)]@java[@extref[remoting sample](ecs:akka-samples-remote-java)] together with a tutorial for a more hands-on experience. The source code of this sample can be found in the -@extref[Akka Samples Repository](samples:akka-sample-remote-scala). +@scala[@extref[Akka Samples Repository](samples:akka-sample-remote-scala)]@java[@extref[Akka Samples Repository](samples:akka-sample-remote-java)]. ## Performance tuning @@ -776,7 +830,7 @@ containers the hostname and port pair that Akka binds to will be different than host name and port pair that is used to connect to the system from the outside. This requires special configuration that sets both the logical and the bind pairs for remoting. -```ruby +``` akka { remote { artery { diff --git a/akka-docs/src/main/paradox/scala/remoting.md b/akka-docs/src/main/paradox/scala/remoting.md index 51ed1d9a02..56c430356b 100644 --- a/akka-docs/src/main/paradox/scala/remoting.md +++ b/akka-docs/src/main/paradox/scala/remoting.md @@ -1,6 +1,6 @@ # Remoting -For an introduction of remoting capabilities of Akka please see @ref[Location Transparency](general/remoting.md). +For an introduction of remoting capabilities of Akka please see @ref:[Location Transparency](general/remoting.md). @@@ note @@ -596,7 +596,7 @@ the other (the "server"). Note that if TLS is enabled with mutual authentication there is still a risk that an attacker can gain access to a valid certificate by compromising any node with certificates issued by the same internal PKI tree. -See also a description of the settings in the @ref[Remote Configuration](remoting.md#remote-configuration) section. +See also a description of the settings in the @ref:[Remote Configuration](remoting.md#remote-configuration) section. @@@ note diff --git a/akka-docs/src/main/paradox/scala/scheduler.md b/akka-docs/src/main/paradox/scala/scheduler.md index ba0edabab5..24d6946565 100644 --- a/akka-docs/src/main/paradox/scala/scheduler.md +++ b/akka-docs/src/main/paradox/scala/scheduler.md @@ -10,6 +10,10 @@ You can schedule sending of messages to actors and execution of tasks (functions or Runnable). You will get a `Cancellable` back that you can call `cancel` on to cancel the execution of the scheduled operation. +When scheduling periodic or single messages in an actor to itself it is recommended to +use the @ref:[Actor Timers](actors.md#actors-timers) instead of using the `Scheduler` +directly. + The scheduler in Akka is designed for high-throughput of thousands up to millions of triggers. The prime use-case being triggering Actor receive timeouts, Future timeouts, circuit breakers and other time dependent events which happen all-the-time and in many diff --git a/akka-docs/src/main/paradox/scala/security/2017-08-09-camel.md b/akka-docs/src/main/paradox/scala/security/2017-08-09-camel.md new file mode 100644 index 0000000000..d01ede83fc --- /dev/null +++ b/akka-docs/src/main/paradox/scala/security/2017-08-09-camel.md @@ -0,0 +1,31 @@ +# Camel Dependency, Fixed in Akka 2.5.4 + +### Date + +9 August 2017 + +### Description of Vulnerability + +Apache Camel's Validation Component is vulnerable against SSRF via remote DTDs and XXE, as described in [CVE-2017-5643](https://nvd.nist.gov/vuln/detail/CVE-2017-5643) + +To protect against such attacks the system should be updated to Akka *2.4.20*, *2.5.4* or later. Dependencies to Camel libraries should be updated to version 2.17.7. + +### Severity + +The [CVSS](https://en.wikipedia.org/wiki/CVSS) score of this vulnerability is 7.4 (High), according to [CVE-2017-5643](https://nvd.nist.gov/vuln/detail/CVE-2017-5643). + +### Affected Versions + + * Akka *2.4.19* and prior + * Akka *2.5.3* and prior + +### Fixed Versions + +We have prepared patches for the affected versions, and have released the following versions which resolve the issue: + + * Akka *2.4.20* (Scala 2.11, 2.12) + * Akka *2.5.4* (Scala 2.11, 2.12) + +### Acknowledgements + +We would like to thank Thomas Szymanski for bringing this issue to our attention. diff --git a/akka-docs/src/main/paradox/scala/security/index.md b/akka-docs/src/main/paradox/scala/security/index.md index ab38599ff2..6e536ede39 100644 --- a/akka-docs/src/main/paradox/scala/security/index.md +++ b/akka-docs/src/main/paradox/scala/security/index.md @@ -29,5 +29,6 @@ to ensure that a fix can be provided without delay. @@@ index * [2017-02-10-java-serialization](2017-02-10-java-serialization.md) +* [2017-08-09-camel](2017-08-09-camel.md) @@@ diff --git a/akka-docs/src/main/paradox/scala/stream/stages-overview.md b/akka-docs/src/main/paradox/scala/stream/stages-overview.md index c2aeea459f..b320f9a838 100644 --- a/akka-docs/src/main/paradox/scala/stream/stages-overview.md +++ b/akka-docs/src/main/paradox/scala/stream/stages-overview.md @@ -541,6 +541,24 @@ Sources and sinks for integrating with `java.io.InputStream` and `java.io.Output `StreamConverters`. As they are blocking APIs the implementations of these stages are run on a separate dispatcher configured through the `akka.stream.blocking-io-dispatcher`. +@@@ warning + +Be aware that `asInputStream` and `asOutputStream` materialize `InputStream` and `OutputStream` respectively as +blocking API implementation. They will block tread until data will be available from upstream. +Because of blocking nature these objects cannot be used in `mapMaterializeValue` section as it causes deadlock +of the stream materialization process. +For example, following snippet will fall with timeout exception: + +```scala +... +.toMat(StreamConverters.asInputStream().mapMaterializedValue { inputStream ⇒ + inputStream.read() // this could block forever + ... +}).run() +``` + +@@@ + --------------------------------------------------------------- ### fromOutputStream @@ -688,6 +706,18 @@ depending on being backpressured by downstream or not. --------------------------------------------------------------- +### alsoTo + +Attaches the given `Sink` to this `Flow`, meaning that elements that passes through will also be sent to the `Sink`. + +**emits** when an element is available and demand exists both from the Sink and the downstream + +**backpressures** when downstream or Sink backpressures + +**completes** when upstream completes + +--------------------------------------------------------------- + ### map Transform each element in the stream by calling a mapping function with it and passing the returned value downstream. diff --git a/akka-docs/src/main/paradox/scala/stream/stream-cookbook.md b/akka-docs/src/main/paradox/scala/stream/stream-cookbook.md index 015bc65196..e5959a2796 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-cookbook.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-cookbook.md @@ -25,12 +25,20 @@ general, more targeted recipes are available as separate sections (@ref:[Buffers The simplest solution is to simply use a `map` operation and use `println` to print the elements received to the console. While this recipe is rather simplistic, it is often suitable for a quick debug session. -@@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #println-debug } +Scala +: @@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #println-debug } + +Java +: @@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #println-debug } Another approach to logging is to use `log()` operation which allows configuring logging for elements flowing through the stream as well as completion and erroring. -@@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #log-custom } +Scala +: @@snip [RecipeLoggingElements.scala]($code$/scala/docs/stream/cookbook/RecipeLoggingElements.scala) { #log-custom } + +Java +: @@snip [RecipeLoggingElements.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java) { #log-custom } ### Flattening a stream of sequences @@ -38,27 +46,39 @@ the stream as well as completion and erroring. all the nested elements inside the sequences separately. The `mapConcat` operation can be used to implement a one-to-many transformation of elements using a mapper function -in the form of `In => immutable.Seq[Out]`. In this case we want to map a `Seq` of elements to the elements in the -collection itself, so we can just call `mapConcat(identity)`. +in the form of @scala[`In => immutable.Seq[Out]`] @java[`In -> List`]. In this case we want to map a @scala[`Seq`] @java[`List`] of elements to the elements in the +collection itself, so we can just call @scala[`mapConcat(identity)`] @java[`mapConcat(l -> l)`]. -@@snip [RecipeFlattenSeq.scala]($code$/scala/docs/stream/cookbook/RecipeFlattenSeq.scala) { #flattening-seqs } +Scala +: @@snip [RecipeFlattenSeq.scala]($code$/scala/docs/stream/cookbook/RecipeFlattenSeq.scala) { #flattening-seqs } + +Java +: @@snip [RecipeFlattenList.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeFlattenList.java) { #flattening-lists } ### Draining a stream to a strict collection **Situation:** A possibly unbounded sequence of elements is given as a stream, which needs to be collected into a Scala collection while ensuring boundedness A common situation when working with streams is one where we need to collect incoming elements into a Scala collection. -This operation is supported via `Sink.seq` which materializes into a `Future[Seq[T]]`. +This operation is supported via `Sink.seq` which materializes into a @scala[`Future[Seq[T]]`] @java[`CompletionStage>`]. The function `limit` or `take` should always be used in conjunction in order to guarantee stream boundedness, thus preventing the program from running out of memory. For example, this is best avoided: -@@snip [RecipeSeq.scala]($code$/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-unsafe } +Scala +: @@snip [RecipeSeq.scala]($code$/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-unsafe } -Rather, use `limit` or `take` to ensure that the resulting `Seq` will contain only up to `max` elements: +Java +: @@snip [RecipeSeq.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-unsafe } -@@snip [RecipeSeq.scala]($code$/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-safe } +Rather, use `limit` or `take` to ensure that the resulting @scala[`Seq`] @java[`List`] will contain only up to @scala[`max`] @java[`MAX_ALLOWED_SIZE`] elements: + +Scala +: @@snip [RecipeSeq.scala]($code$/scala/docs/stream/cookbook/RecipeSeq.scala) { #draining-to-seq-safe } + +Java +: @@snip [RecipeSeq.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSeq.java) { #draining-to-list-safe } ### Calculating the digest of a ByteString stream @@ -75,7 +95,12 @@ At this point we want to emit the digest value, but we cannot do it with `push` be no downstream demand. Instead we call `emit` which will temporarily replace the handlers, emit the provided value when demand comes in and then reset the stage state. It will then complete the stage. -@@snip [RecipeDigest.scala]($code$/scala/docs/stream/cookbook/RecipeDigest.scala) { #calculating-digest } +Scala +: @@snip [RecipeDigest.scala]($code$/scala/docs/stream/cookbook/RecipeDigest.scala) { #calculating-digest } + +Java +: @@snip [RecipeDigest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest } +: @@snip [RecipeDigest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDigest.java) { #calculating-digest2 } ### Parsing lines from a stream of ByteStrings @@ -84,18 +109,26 @@ demand comes in and then reset the stage state. It will then complete the stage. characters (or, alternatively, containing binary frames delimited by a special delimiter byte sequence) which needs to be parsed. -The `Framing` helper object contains a convenience method to parse messages from a stream of `ByteString` s: +The `Framing` helper @scala[object] @java[class] contains a convenience method to parse messages from a stream of `ByteString` s: -@@snip [RecipeParseLines.scala]($code$/scala/docs/stream/cookbook/RecipeParseLines.scala) { #parse-lines } +Scala +: @@snip [RecipeParseLines.scala]($code$/scala/docs/stream/cookbook/RecipeParseLines.scala) { #parse-lines } + +Java +: @@snip [RecipeParseLines.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeParseLines.java) { #parse-lines } ### Dealing with compressed data streams **Situation:** A gzipped stream of bytes is given as a stream of `ByteString` s, for example from a `FileIO` source. -The `Compression` helper object contains convenience methods for decompressing data streams compressed with +The `Compression` helper @scala[object] @java[class] contains convenience methods for decompressing data streams compressed with Gzip or Deflate. -@@snip [RecipeDecompress.scala]($code$/scala/docs/stream/cookbook/RecipeDecompress.scala) { #decompress-gzip } +Scala +: @@snip [RecipeDecompress.scala]($code$/scala/docs/stream/cookbook/RecipeDecompress.scala) { #decompress-gzip } + +Java +: @@snip [RecipeDecompress.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDecompress.java) { #decompress-gzip } ### Implementing reduce-by-key @@ -103,28 +136,32 @@ Gzip or Deflate. elements. The "hello world" of reduce-by-key style operations is *wordcount* which we demonstrate below. Given a stream of words -we first create a new stream that groups the words according to the `identity` function, i.e. now +we first create a new stream that groups the words according to the @scala[`identity`] @java[`i -> i`] function, i.e. now we have a stream of streams, where every substream will serve identical words. To count the words, we need to process the stream of streams (the actual groups -containing identical words). `groupBy` returns a `SubFlow`, which +containing identical words). `groupBy` returns a @scala[`SubFlow`] @java[`SubSource`], which means that we transform the resulting substreams directly. In this case we use the `reduce` combinator to aggregate the word itself and the number of its -occurrences within a tuple `(String, Integer)`. Each substream will then +occurrences within a @scala[tuple `(String, Integer)`] @java[`Pair`]. Each substream will then emit one final value—precisely such a pair—when the overall input completes. As a last step we merge back these values from the substreams into one single output stream. -One noteworthy detail pertains to the `MaximumDistinctWords` parameter: this +One noteworthy detail pertains to the @scala[`MaximumDistinctWords`] @java[`MAXIMUM_DISTINCT_WORDS`] parameter: this defines the breadth of the groupBy and merge operations. Akka Streams is focused on bounded resource consumption and the number of concurrently open inputs to the merge operator describes the amount of resources needed by the -merge itself. Therefore only a finite number of substreams can be active at +merge itself. Therefore only a finite number of substreams can be active at any given time. If the `groupBy` operator encounters more keys than this number then the stream cannot continue without violating its resource bound, in this case `groupBy` will terminate with a failure. -@@snip [RecipeReduceByKey.scala]($code$/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #word-count } +Scala +: @@snip [RecipeReduceByKey.scala]($code$/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #word-count } + +Java +: @@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #word-count } By extracting the parts specific to *wordcount* into @@ -134,7 +171,13 @@ By extracting the parts specific to *wordcount* into we get a generalized version below: -@@snip [RecipeReduceByKey.scala]($code$/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #reduce-by-key-general } +Scala +: @@snip [RecipeReduceByKey.scala]($code$/scala/docs/stream/cookbook/RecipeReduceByKey.scala) { #reduce-by-key-general } + +Java +: @@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general } +: @@snip [RecipeReduceByKeyTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeReduceByKeyTest.java) { #reduce-by-key-general2 } + @@@ note @@ -152,12 +195,16 @@ Sometimes we want to map elements into multiple groups simultaneously. To achieve the desired result, we attack the problem in two steps: * first, using a function `topicMapper` that gives a list of topics (groups) a message belongs to, we transform our -stream of `Message` to a stream of `(Message, Topic)` where for each topic the message belongs to a separate pair +stream of `Message` to a stream of @scala[`(Message, Topic)`] @java[`Pair`] where for each topic the message belongs to a separate pair will be emitted. This is achieved by using `mapConcat` * Then we take this new stream of message topic pairs (containing a separate pair for each topic a given message belongs to) and feed it into groupBy, using the topic as the group key. -@@snip [RecipeMultiGroupBy.scala]($code$/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala) { #multi-groupby } +Scala +: @@snip [RecipeMultiGroupBy.scala]($code$/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala) { #multi-groupby } + +Java +: @@snip [RecipeMultiGroupByTest.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeMultiGroupByTest.java) { #multi-groupby } ## Working with Graphs @@ -172,14 +219,22 @@ trigger signal arrives. This recipe solves the problem by simply zipping the stream of `Message` elements with the stream of `Trigger` signals. Since `Zip` produces pairs, we simply map the output stream selecting the first element of the pair. -@@snip [RecipeManualTrigger.scala]($code$/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream } +Scala +: @@snip [RecipeManualTrigger.scala]($code$/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream } + +Java +: @@snip [RecipeManualTrigger.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream } Alternatively, instead of using a `Zip`, and then using `map` to get the first element of the pairs, we can avoid creating the pairs in the first place by using `ZipWith` which takes a two argument function to produce the output element. If this function would return a pair of the two argument it would be exactly the behavior of `Zip` so `ZipWith` is a generalization of zipping. -@@snip [RecipeManualTrigger.scala]($code$/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream-zipwith } +Scala +: @@snip [RecipeManualTrigger.scala]($code$/scala/docs/stream/cookbook/RecipeManualTrigger.scala) { #manually-triggered-stream-zipwith } + +Java +: @@snip [RecipeManualTrigger.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeManualTrigger.java) { #manually-triggered-stream-zipwith } ### Balancing jobs to a fixed pool of workers @@ -197,7 +252,12 @@ we wire the outputs of these workers to a `Merge` element that will collect the To make the worker stages run in parallel we mark them as asynchronous with *async*. -@@snip [RecipeWorkerPool.scala]($code$/scala/docs/stream/cookbook/RecipeWorkerPool.scala) { #worker-pool } +Scala +: @@snip [RecipeWorkerPool.scala]($code$/scala/docs/stream/cookbook/RecipeWorkerPool.scala) { #worker-pool } + +Java +: @@snip [RecipeWorkerPool.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool } +: @@snip [RecipeWorkerPool.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeWorkerPool.java) { #worker-pool2 } ## Working with rate @@ -216,7 +276,11 @@ the speed of the upstream unaffected by the downstream. When the upstream is faster, the reducing process of the `conflate` starts. Our reducer function simply takes the freshest element. This in a simple dropping operation. -@@snip [RecipeSimpleDrop.scala]($code$/scala/docs/stream/cookbook/RecipeSimpleDrop.scala) { #simple-drop } +Scala +: @@snip [RecipeSimpleDrop.scala]($code$/scala/docs/stream/cookbook/RecipeSimpleDrop.scala) { #simple-drop } + +Java +: @@snip [RecipeSimpleDrop.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeSimpleDrop.java) { #simple-drop } There is a more general version of `conflate` named `conflateWithSeed` that allows to express more complex aggregations, more similar to a `fold`. @@ -233,7 +297,12 @@ defining a dropping strategy instead of the default `Backpressure`. This allows between the different consumers (the buffer smooths out small rate variances), but also allows faster consumers to progress by dropping from the buffer of the slow consumers if necessary. -@@snip [RecipeDroppyBroadcast.scala]($code$/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala) { #droppy-bcast } +Scala +: @@snip [RecipeDroppyBroadcast.scala]($code$/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala) { #droppy-bcast } + +Java +: @@snip [RecipeDroppyBroadcast.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast } +: @@snip [RecipeDroppyBroadcast.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeDroppyBroadcast.java) { #droppy-bcast2 } ### Collecting missed ticks @@ -252,7 +321,11 @@ count of the missed ticks so far. As a result, we have a flow of `Int` where the number represents the missed ticks. A number 0 means that we were able to consume the tick fast enough (i.e. zero means: 1 non-missed tick + 0 missed ticks) -@@snip [RecipeMissedTicks.scala]($code$/scala/docs/stream/cookbook/RecipeMissedTicks.scala) { #missed-ticks } +Scala +: @@snip [RecipeMissedTicks.scala]($code$/scala/docs/stream/cookbook/RecipeMissedTicks.scala) { #missed-ticks } + +Java +: @@snip [RecipeMissedTicks.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeMissedTicks.java) { #missed-ticks } ### Create a stream processor that repeats the last element seen @@ -266,20 +339,28 @@ to feed the downstream if no upstream element is ready yet. In the `onPush()` ha `currentValue` variable and immediately relieve the upstream by calling `pull()`. The downstream `onPull` handler is very similar, we immediately relieve the downstream by emitting `currentValue`. -@@snip [RecipeHold.scala]($code$/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-1 } +Scala +: @@snip [RecipeHold.scala]($code$/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-1 } + +Java +: @@snip [RecipeHold.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-1 } While it is relatively simple, the drawback of the first version is that it needs an arbitrary initial element which is not always possible to provide. Hence, we create a second version where the downstream might need to wait in one single case: if the very first element is not yet available. We introduce a boolean variable `waitingFirstValue` to denote whether the first element has been provided or not -(alternatively an `Option` can be used for `currentValue` or if the element type is a subclass of AnyRef +(alternatively an @scala[`Option`] @java[`Optional`] can be used for `currentValue` or if the element type is a subclass of @scala[`AnyRef`] @java[`Object`] a null can be used with the same purpose). In the downstream `onPull()` handler the difference from the previous version is that we check if we have received the first value and only emit if we have. This leads to that when the first element comes in we must check if there possibly already was demand from downstream so that we in that case can push the element directly. -@@snip [RecipeHold.scala]($code$/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-2 } +Scala +: @@snip [RecipeHold.scala]($code$/scala/docs/stream/cookbook/RecipeHold.scala) { #hold-version-2 } + +Java +: @@snip [RecipeHold.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeHold.java) { #hold-version-2 } ### Globally limiting the rate of a set of streams @@ -299,13 +380,21 @@ of the sender is added to a queue. Once the timer for replenishing the pending p message, we increment the pending permits counter and send a reply to each of the waiting senders. If there are more waiting senders than permits available we will stay in the `closed` state. -@@snip [RecipeGlobalRateLimit.scala]($code$/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-actor } +Scala +: @@snip [RecipeGlobalRateLimit.scala]($code$/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-actor } + +Java +: @@snip [RecipeGlobalRateLimit.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-actor } To create a Flow that uses this global limiter actor we use the `mapAsync` function with the combination of the `ask` pattern. We also define a timeout, so if a reply is not received during the configured maximum wait period the returned future from `ask` will fail, which will fail the corresponding stream as well. -@@snip [RecipeGlobalRateLimit.scala]($code$/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-flow } +Scala +: @@snip [RecipeGlobalRateLimit.scala]($code$/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala) { #global-limiter-flow } + +Java +: @@snip [RecipeGlobalRateLimit.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeGlobalRateLimit.java) { #global-limiter-flow } @@@ note @@ -332,7 +421,12 @@ and an empty or nonempty remaining buffer. Both `onPush()` and `onPull()` calls `emitChunk()` the only difference is that the push handler also stores the incoming chunk by appending to the end of the buffer. -@@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytestring-chunker } +Scala +: @@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytestring-chunker } + +Java +: @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker } +: @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytestring-chunker2 } ### Limit the number of bytes passing through a stream of ByteStrings @@ -343,8 +437,13 @@ This recipe uses a `GraphStage` to implement the desired feature. In the only ha `onPush()` we just update a counter and see if it gets larger than `maximumBytes`. If a violation happens we signal failure, otherwise we forward the chunk we have received. -@@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytes-limiter } - +Scala +: @@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #bytes-limiter } + +Java +: @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter } + @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #bytes-limiter2 } + ### Compact ByteStrings in a stream of ByteStrings **Situation:** After a long stream of transformations, due to their immutable, structural sharing nature `ByteString` s may @@ -354,8 +453,12 @@ chain we want to have clean copies that are no longer referencing the original ` The recipe is a simple use of map, calling the `compact()` method of the `ByteString` elements. This does copying of the underlying arrays, so this should be the last element of a long chain if used. -@@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #compacting-bytestrings } - +Scala +: @@snip [RecipeByteStrings.scala]($code$/scala/docs/stream/cookbook/RecipeByteStrings.scala) { #compacting-bytestrings } + +Java +: @@snip [RecipeByteStrings.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeByteStrings.java) { #compacting-bytestrings } + ### Injecting keep-alive messages into a stream of ByteStrings **Situation:** Given a communication channel expressed as a stream of `ByteString` s we want to inject keep-alive messages @@ -363,4 +466,8 @@ but only if this does not interfere with normal traffic. There is a built-in operation that allows to do this directly: -@@snip [RecipeKeepAlive.scala]($code$/scala/docs/stream/cookbook/RecipeKeepAlive.scala) { #inject-keepalive } \ No newline at end of file +Scala +: @@snip [RecipeKeepAlive.scala]($code$/scala/docs/stream/cookbook/RecipeKeepAlive.scala) { #inject-keepalive } + +Java +: @@snip [RecipeKeepAlive.java]($code$/java/jdocs/stream/javadsl/cookbook/RecipeKeepAlive.java) { #inject-keepalive } \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-customize.md b/akka-docs/src/main/paradox/scala/stream/stream-customize.md index b61fb86bbb..c1708666b7 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-customize.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-customize.md @@ -278,7 +278,7 @@ with care. ### Logging inside GraphStages Logging debug or other important information in your stages is often a very good idea, especially when developing -more advances stages which may need to be debugged at some point. +more advanced stages which may need to be debugged at some point. @@@ div { .group-scala } diff --git a/akka-docs/src/main/paradox/scala/stream/stream-dynamic.md b/akka-docs/src/main/paradox/scala/stream/stream-dynamic.md index 026399ff94..f408514ccd 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-dynamic.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-dynamic.md @@ -74,7 +74,7 @@ before any materialization takes place. @@@ -## Dynamic fan-in and fan-out with MergeHub and BroadcastHub +## Dynamic fan-in and fan-out with MergeHub, BroadcastHub and PartitionHub There are many cases when consumers or producers of a certain service (represented as a Sink, Source, or possibly Flow) are dynamic and not known in advance. The Graph DSL does not allow to represent this, all connections of the graph @@ -169,4 +169,71 @@ Scala : @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #pub-sub-4 } Java -: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-4 } \ No newline at end of file +: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #pub-sub-4 } + +### Using the PartitionHub + +**This is a @ref:[may change](../common/may-change.md) feature*** + +A `PartitionHub` can be used to route elements from a common producer to a dynamic set of consumers. +The selection of consumer is done with a function. Each element can be routed to only one consumer. + +The rate of the producer will be automatically adapted to the slowest consumer. In this case, the hub is a `Sink` +to which the single producer must be attached first. Consumers can only be attached once the `Sink` has +been materialized (i.e. the producer has been started). One example of using the `PartitionHub`: + +Scala +: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #partition-hub } + +Java +: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #partition-hub } + +The `partitioner` function takes two parameters; the first is the number of active consumers and the second +is the stream element. The function should return the index of the selected consumer for the given element, +i.e. `int` greater than or equal to 0 and less than number of consumers. + +The resulting `Source` can be materialized any number of times, each materialization effectively attaching +a new consumer. If there are no consumers attached to this hub then it will not drop any elements but instead +backpressure the upstream producer until consumers arrive. This behavior can be tweaked by using the combinators +`.buffer` for example with a drop strategy, or just attaching a consumer that drops all messages. If there +are no other consumers, this will ensure that the producer is kept drained (dropping all elements) and once a new +consumer arrives and messages are routed to the new consumer it will adaptively slow down, ensuring no more messages +are dropped. + +It is possible to define how many initial consumers that are required before it starts emitting any messages +to the attached consumers. While not enough consumers have been attached messages are buffered and when the +buffer is full the upstream producer is backpressured. No messages are dropped. + +The above example illustrate a stateless partition function. For more advanced stateful routing the @java[`ofStateful`] +@scala[`statefulSink`] can be used. Here is an example of a stateful round-robin function: + +Scala +: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #partition-hub-stateful } + +Java +: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #partition-hub-stateful } + +Note that it is a factory of a function to to be able to hold stateful variables that are +unique for each materialization. @java[In this example the `partitioner` function is implemented as a class to +be able to hold the mutable variable. A new instance of `RoundRobin` is created for each materialization of the hub.] + +@@@ div { .group-java } +@@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #partition-hub-stateful-function } +@@@ + +The function takes two parameters; the first is information about active consumers, including an array of +consumer identifiers and the second is the stream element. The function should return the selected consumer +identifier for the given element. The function will never be called when there are no active consumers, i.e. +there is always at least one element in the array of identifiers. + +Another interesting type of routing is to prefer routing to the fastest consumers. The `ConsumerInfo` +has an accessor `queueSize` that is approximate number of buffered elements for a consumer. +Larger value than other consumers could be an indication of that the consumer is slow. +Note that this is a moving target since the elements are consumed concurrently. Here is an example of +a hub that routes to the consumer with least buffered elements: + +Scala +: @@snip [HubsDocSpec.scala]($code$/scala/docs/stream/HubsDocSpec.scala) { #partition-hub-fastest } + +Java +: @@snip [HubDocTest.java]($code$/java/jdocs/stream/HubDocTest.java) { #partition-hub-fastest } diff --git a/akka-docs/src/main/paradox/scala/stream/stream-error.md b/akka-docs/src/main/paradox/scala/stream/stream-error.md index 73655c4e55..7785fec202 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-error.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-error.md @@ -1,17 +1,135 @@ -# Error Handling +# Error Handling in Streams -Strategies for how to handle exceptions from processing stream elements can be defined when -materializing the stream. The error handling strategies are inspired by actor supervision -strategies, but the semantics have been adapted to the domain of stream processing. +When a stage in a stream fails this will normally lead to the entire stream being torn down. +Each of the stages downstream gets informed about the failure and each upstream stage sees a cancellation. -@@@ warning +In many cases you may want to avoid complete stream failure, this can be done in a few different ways: -*ZipWith*, *GraphStage* junction, *ActorPublisher* source and *ActorSubscriber* sink -components do not honour the supervision strategy attribute yet. + * `recover` to emit a final element then complete the stream normally on upstream failure + * `recoverWithRetries` to create a new upstream and start consuming from that on failure + * Restarting sections of the stream after a backoff + * Using a supervision strategy for stages that support it + +In addition to these built in tools for error handling, a common pattern is to wrap the stream +inside an actor, and have the actor restart the entire stream on failure. + +## Recover + +`recover` allows you to emit a final element and then complete the stream on an upstream failure. +Deciding which exceptions should be recovered is done through a `PartialFunction`. If an exception +does not have a @scala[matching case] @java[match defined] the stream is failed. + +Recovering can be useful if you want to gracefully complete a stream on failure while letting +downstream know that there was a failure. + +Scala +: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #recover } + +Java +: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #recover } + +This will output: + +Scala +: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #recover-output } + +Java +: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #recover-output } + + +## Recover with retries + +`recoverWithRetries` allows you to put a new upstream in place of the failed one, recovering +stream failures up to a specified maximum number of times. + +Deciding which exceptions should be recovered is done through a `PartialFunction`. If an exception +does not have a @scala[matching case] @java[match defined] the stream is failed. + +Scala +: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #recoverWithRetries } + +Java +: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #recoverWithRetries } + +This will output: + + +Scala +: @@snip [FlowErrorDocSpec.scala]($code$/scala/docs/stream/FlowErrorDocSpec.scala) { #recoverWithRetries-output } + +Java +: @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #recoverWithRetries-output } + + + + +## Delayed restarts with a backoff stage + +Just as Akka provides the @ref:[backoff supervision pattern for actors](../general/supervision.md#backoff-supervisor), Akka streams +also provides a `RestartSource`, `RestartSink` and `RestartFlow` for implementing the so-called *exponential backoff +supervision strategy*, starting a stage again when it fails, each time with a growing time delay between restarts. + +This pattern is useful when the stage fails or completes because some external resource is not available +and we need to give it some time to start-up again. One of the prime examples when this is useful is +when a WebSocket connection fails due to the HTTP server it's running on going down, perhaps because it is overloaded. +By using an exponential backoff, we avoid going into a tight reconnect look, which both gives the HTTP server some time +to recover, and it avoids using needless resources on the client side. + +The following snippet shows how to create a backoff supervisor using @scala[`akka.stream.scaladsl.RestartSource`] +@java[`akka.stream.javadsl.RestartSource`] which will supervise the given `Source`. The `Source` in this case is a +stream of Server Sent Events, produced by akka-http. If the stream fails or completes at any point, the request will +be made again, in increasing intervals of 3, 6, 12, 24 and finally 30 seconds (at which point it will remain capped due +to the `maxBackoff` parameter): + +Scala +: @@snip [RestartDocSpec.scala]($code$/scala/docs/stream/RestartDocSpec.scala) { #restart-with-backoff-source } + +Java +: @@snip [RestartDocTest.java]($code$/java/jdocs/stream/RestartDocTest.java) { #restart-with-backoff-source } + +Using a `randomFactor` to add a little bit of additional variance to the backoff intervals +is highly recommended, in order to avoid multiple streams re-start at the exact same point in time, +for example because they were stopped due to a shared resource such as the same server going down +and re-starting after the same configured interval. By adding additional randomness to the +re-start intervals the streams will start in slightly different points in time, thus avoiding +large spikes of traffic hitting the recovering server or other resource that they all need to contact. + +The above `RestartSource` will never terminate unless the `Sink` it's fed into cancels. It will often be handy to use +it in combination with a @ref:[`KillSwitch`](stream-dynamic.md#kill-switch), so that you can terminate it when needed: + +Scala +: @@snip [RestartDocSpec.scala]($code$/scala/docs/stream/RestartDocSpec.scala) { #with-kill-switch } + +Java +: @@snip [RestartDocTest.java]($code$/java/jdocs/stream/RestartDocTest.java) { #with-kill-switch } + +Sinks and flows can also be supervised, using @scala[`akka.stream.scaladsl.RestartSink` and `akka.stream.scaladsl.RestartFlow`] +@java[`akka.stream.scaladsl.RestartSink` and `akka.stream.scaladsl.RestartFlow`]. The `RestartSink` is restarted when +it cancels, while the `RestartFlow` is restarted when either the in port cancels, the out port completes, or the out + port sends an error. + +## Supervision Strategies + +@@@ note + +The stages that support supervision strategies are explicitly documented to do so, if there is +nothing in the documentation of a stage saying that it adheres to the supervision strategy it +means it fails rather than applies supervision. @@@ -## Supervision Strategies +The error handling strategies are inspired by actor supervision strategies, but the semantics +have been adapted to the domain of stream processing. The most important difference is that +supervision is not automatically applied to stream stages but instead something that each stage +has to implement explicitly. + +For many stages it may not even make sense to implement support for supervision strategies, +this is especially true for stages connecting to external technologies where for example a +failed connection will likely still fail if a new connection is tried immediately (see +@ref:[Restart with back off](#restart-with-backoff) for such scenarios). + +For stages that do implement supervision, the strategies for how to handle exceptions from +processing stream elements can be selected when materializing the stream through use of an attribute. There are three ways to handle exceptions from application code: @@ -65,9 +183,10 @@ Scala Java : @@snip [FlowErrorDocTest.java]($code$/java/jdocs/stream/FlowErrorDocTest.java) { #restart-section } -## Errors from mapAsync +### Errors from mapAsync -Stream supervision can also be applied to the futures of `mapAsync`. +Stream supervision can also be applied to the futures of `mapAsync` and `mapAsyncUnordered` even if such +failures happen in the future rather than inside the stage itself. Let's say that we use an external service to lookup email addresses and we would like to discard those that cannot be found. @@ -101,4 +220,5 @@ Java : @@snip [IntegrationDocTest.java]($code$/java/jdocs/stream/IntegrationDocTest.java) { #email-addresses-mapAsync-supervision } If we would not use `Resume` the default stopping strategy would complete the stream -with failure on the first @scala[`Future`] @java[`CompletionStage`] that was completed @scala[with `Failure`]@java[exceptionally]. \ No newline at end of file +with failure on the first @scala[`Future`] @java[`CompletionStage`] that was completed @scala[with `Failure`]@java[exceptionally]. + diff --git a/akka-docs/src/main/paradox/scala/stream/stream-flows-and-basics.md b/akka-docs/src/main/paradox/scala/stream/stream-flows-and-basics.md index d1a534b9fb..2f64bcf82a 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-flows-and-basics.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-flows-and-basics.md @@ -254,7 +254,8 @@ but is not restricted to that—it could also mean opening files or socket conne Materialization is triggered at so called "terminal operations". Most notably this includes the various forms of the `run()` and `runWith()` methods defined on `Source` and `Flow` elements as well as a small number of special syntactic sugars for running with -well-known sinks, such as @scala[`runForeach(el => ...)`] @java[`runForeach(el -> ...)`] (being an alias to @scala[`runWith(Sink.foreach(el => ...))`] @java[`runWith(Sink.foreach(el -> ...))`]. +well-known sinks, such as @scala[`runForeach(el => ...)`]@java[`runForeach(el -> ...)`] +(being an alias to @scala[`runWith(Sink.foreach(el => ...))`]@java[`runWith(Sink.foreach(el -> ...))`]). Materialization is currently performed synchronously on the materializing thread. The actual stream processing is handled by actors started up during the streams materialization, diff --git a/akka-docs/src/main/paradox/scala/stream/stream-graphs.md b/akka-docs/src/main/paradox/scala/stream/stream-graphs.md index 6f5d2b0592..f20e3362fc 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-graphs.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-graphs.md @@ -2,7 +2,7 @@ In Akka Streams computation graphs are not expressed using a fluent DSL like linear computations are, instead they are written in a more graph-resembling DSL which aims to make translating graph drawings (e.g. from notes taken -from design discussions, or illustrations in protocol specifications) to and from code simpler. In this section we’ll +from design discussions, or illustrations in protocol specifications) to and from code simpler. In this section we'll dive into the multiple ways of constructing and re-using graphs, as well as explain common pitfalls and how to avoid them. Graphs are needed whenever you want to perform any kind of fan-in ("multiple inputs") or fan-out ("multiple outputs") operations. @@ -18,23 +18,23 @@ Graphs are built from simple Flows which serve as the linear connections within which serve as fan-in and fan-out points for Flows. Thanks to the junctions having meaningful types based on their behaviour and making them explicit elements these elements should be rather straightforward to use. -Akka Streams currently provide these junctions (for a detailed list see @ref:[stages overview](stages-overview.md)): +Akka Streams currently provide these junctions (for a detailed list see @ref[stages overview](stages-overview.md)): * **Fan-out** - * `Broadcast[T]` – *(1 input, N outputs)* given an input element emits to each output - * `Balance[T]` – *(1 input, N outputs)* given an input element emits to one of its output ports - * `UnzipWith[In,A,B,...]` – *(1 input, N outputs)* takes a function of 1 input that given a value for each input emits N output elements (where N <= 20) - * `UnZip[A,B]` – *(1 input, 2 outputs)* splits a stream of `(A,B)` tuples into two streams, one of type `A` and one of type `B` + * @scala[`Broadcast[T]`]@java[`Broadcast`] – *(1 input, N outputs)* given an input element emits to each output + * @scala[`Balance[T]`]@java[`Balance`] – *(1 input, N outputs)* given an input element emits to one of its output ports + * @scala[`UnzipWith[In,A,B,...]`]@java[`UnzipWith`] – *(1 input, N outputs)* takes a function of 1 input that given a value for each input emits N output elements (where N <= 20) + * @scala[`UnZip[A,B]`]@java[`UnZip`] – *(1 input, 2 outputs)* splits a stream of @scala[`(A,B)`]@java[`Pair`] tuples into two streams, one of type `A` and one of type `B` * **Fan-in** - * `Merge[In]` – *(N inputs , 1 output)* picks randomly from inputs pushing them one by one to its output - * `MergePreferred[In]` – like `Merge` but if elements are available on `preferred` port, it picks from it, otherwise randomly from `others` - * `MergePrioritized[In]` – like `Merge` but if elements are available on all input ports, it picks from them randomly based on their `priority` - * `ZipWith[A,B,...,Out]` – *(N inputs, 1 output)* which takes a function of N inputs that given a value for each input emits 1 output element - * `Zip[A,B]` – *(2 inputs, 1 output)* is a `ZipWith` specialised to zipping input streams of `A` and `B` into an `(A,B)` tuple stream - * `Concat[A]` – *(2 inputs, 1 output)* concatenates two streams (first consume one, then the second one) + * @scala[`Merge[In]`]@java[`Merge`] – *(N inputs , 1 output)* picks randomly from inputs pushing them one by one to its output + * @scala[`MergePreferred[In]`]@java[`MergePreferred`] – like `Merge` but if elements are available on `preferred` port, it picks from it, otherwise randomly from `others` + * @scala[`MergePrioritized[In]`]@java[`MergePrioritized`] – like `Merge` but if elements are available on all input ports, it picks from them randomly based on their `priority` + * @scala[`ZipWith[A,B,...,Out]`]@java[`ZipWith`] – *(N inputs, 1 output)* which takes a function of N inputs that given a value for each input emits 1 output element + * @scala[`Zip[A,B]`]@java[`Zip`] – *(2 inputs, 1 output)* is a `ZipWith` specialised to zipping input streams of `A` and `B` into a @scala[`(A,B)`]@java[`Pair(A,B)`] tuple stream + * @scala[`Concat[A]`]@java[`Concat`] – *(2 inputs, 1 output)* concatenates two streams (first consume one, then the second one) One of the goals of the GraphDSL DSL is to look similar to how one would draw a graph on a whiteboard, so that it is simple to translate a design from whiteboard to code and be able to relate those two. Let's illustrate this by translating @@ -44,10 +44,14 @@ the below hand drawn graph into Akka Streams: Such graph is simple to translate to the Graph DSL since each linear element corresponds to a `Flow`, and each circle corresponds to either a `Junction` or a `Source` or `Sink` if it is beginning -or ending a `Flow`. Junctions must always be created with defined type parameters, as otherwise the `Nothing` type -will be inferred. +or ending a `Flow`. @scala[Junctions must always be created with defined type parameters, as otherwise the `Nothing` type +will be inferred.] -@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #simple-graph-dsl } +Scala +: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #simple-graph-dsl } + +Java +: @@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #simple-graph-dsl } @@@ note @@ -56,13 +60,13 @@ refers to the same location in the resulting graph). @@@ -Notice the `import GraphDSL.Implicits._` which brings into scope the `~>` operator (read as "edge", "via" or "to") -and its inverted counterpart `<~` (for noting down flows in the opposite direction where appropriate). +@scala[Notice the `import GraphDSL.Implicits._` which brings into scope the `~>` operator (read as "edge", "via" or "to") +and its inverted counterpart `<~` (for noting down flows in the opposite direction where appropriate).] -By looking at the snippets above, it should be apparent that the `GraphDSL.Builder` object is *mutable*. -It is used (implicitly) by the `~>` operator, also making it a mutable operation as well. +By looking at the snippets above, it should be apparent that the @scala[`GraphDSL.Builder`]@java[`builder`] object is *mutable*. +@scala[It is used (implicitly) by the `~>` operator, also making it a mutable operation as well.] The reason for this design choice is to enable simpler creation of complex graphs, which may even contain cycles. -Once the GraphDSL has been constructed though, the `GraphDSL` instance *is immutable, thread-safe, and freely shareable*. +Once the GraphDSL has been constructed though, the @scala[`GraphDSL`]@java[`RunnableGraph`] instance *is immutable, thread-safe, and freely shareable*. The same is true of all graph pieces—sources, sinks, and flows—once they are constructed. This means that you can safely re-use one given Flow or junction in multiple places in a processing graph. @@ -72,13 +76,17 @@ is passed to it and return the inlets and outlets of the resulting copy so that Another alternative is to pass existing graphs—of any shape—into the factory method that produces a new graph. The difference between these approaches is that importing using `builder.add(...)` ignores the materialized value of the imported graph while importing via the factory method allows its inclusion; -for more details see @ref:[Stream Materialization](stream-flows-and-basics.md#stream-materialization). +for more details see @ref[Stream Materialization](stream-flows-and-basics.md#stream-materialization). In the example below we prepare a graph that consists of two parallel streams, in which we re-use the same instance of `Flow`, yet it will properly be materialized as two connections between the corresponding Sources and Sinks: -@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-reusing-a-flow } +Scala +: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-reusing-a-flow } + +Java +: @@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-reusing-a-flow } ## Constructing and combining Partial Graphs @@ -86,10 +94,11 @@ materialized as two connections between the corresponding Sources and Sinks: Sometimes it is not possible (or needed) to construct the entire computation graph in one place, but instead construct all of its different phases in different places and in the end connect them all into a complete graph and run it. -This can be achieved by returning a different `Shape` than `ClosedShape`, for example `FlowShape(in, out)`, from the +This can be achieved by @scala[returning a different `Shape` than `ClosedShape`, for example `FlowShape(in, out)`, from the function given to `GraphDSL.create`. See [Predefined shapes](#predefined-shapes)) for a list of such predefined shapes. - -Making a `Graph` a `RunnableGraph` requires all ports to be connected, and if they are not +Making a `Graph` a `RunnableGraph`]@java[using the returned `Graph` from `GraphDSL.create()` rather than +passing it to `RunnableGraph.fromGraph()` to wrap it in a `RunnableGraph`.The reason of representing it as a different type is that a +`RunnableGraph`] requires all ports to be connected, and if they are not it will throw an exception at construction time, which helps to avoid simple wiring errors while working with graphs. A partial graph however allows you to return the set of yet to be connected ports from the code block that @@ -99,11 +108,17 @@ Let's imagine we want to provide users with a specialized element that given 3 i the greatest int value of each zipped triple. We'll want to expose 3 input ports (unconnected sources) and one output port (unconnected sink). -@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #simple-partial-graph-dsl } +Scala +: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #simple-partial-graph-dsl } -As you can see, first we construct the partial graph that contains all the zipping and comparing of stream -elements. This partial graph will have three inputs and one output, wherefore we use the `UniformFanInShape`. -Then we import it (all of its nodes and connections) explicitly into the closed graph built in the second step in which all +Java +: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #simple-partial-graph-dsl } + + +As you can see, first we construct the partial graph that @scala[contains all the zipping and comparing of stream +elements. This partial graph will have three inputs and one output, wherefore we use the `UniformFanInShape`]@java[describes how to compute the maximum of two input streams. +then we reuse that twice while constructing the partial graph that extends this to three input streams]. +Then we import it (all of its nodes and connections) explicitly into the @scala[closed graph built in the second step]@java[last graph] in which all the undefined elements are rewired to real sources and sinks. The graph can then be run and yields the expected result. @@@ warning @@ -118,7 +133,7 @@ A partial graph also verifies that all ports are either connected or part of the ## Constructing Sources, Sinks and Flows from Partial Graphs -Instead of treating a partial graph as simply a collection of flows and junctions which may not yet all be +Instead of treating a @scala[partial graph]@java[`Graph`] as simply a collection of flows and junctions which may not yet all be connected it is sometimes useful to expose such a complex graph as a simpler structure, such as a `Source`, `Sink` or `Flow`. @@ -132,38 +147,60 @@ Being able to hide complex graphs inside of simple elements such as Sink / Sourc complex element and from there on treat it as simple compound stage for linear computations. In order to create a Source from a graph the method `Source.fromGraph` is used, to use it we must have a -`Graph[SourceShape, T]`. This is constructed using `GraphDSL.create` and returning a `SourceShape` -from the function passed in . The single outlet must be provided to the `SourceShape.of` method and will become +@scala[`Graph[SourceShape, T]`]@java[`Graph` with a `SourceShape`]. This is constructed using +@scala[`GraphDSL.create` and returning a `SourceShape` from the function passed in]@java[`GraphDSL.create` and providing building a `SourceShape` graph]. +The single outlet must be provided to the `SourceShape.of` method and will become “the sink that must be attached before this Source can run”. Refer to the example below, in which we create a Source that zips together two numbers, to see this graph construction in action: -@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-from-partial-graph-dsl } +Scala +: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-from-partial-graph-dsl } -Similarly the same can be done for a `Sink[T]`, using `SinkShape.of` in which case the provided value -must be an `Inlet[T]`. For defining a `Flow[T]` we need to expose both an inlet and an outlet: +Java +: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-from-partial-graph-dsl } + + +Similarly the same can be done for a @scala[`Sink[T]`]@java[`Sink`], using `SinkShape.of` in which case the provided value +must be an @scala[`Inlet[T]`]@java[`Inlet`]. For defining a @scala[`Flow[T]`]@java[`Flow`] we need to expose both an @scala[inlet and an outlet]@java[undefined source and sink]: + +Scala +: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #flow-from-partial-graph-dsl } + +Java +: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #flow-from-partial-graph-dsl } -@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #flow-from-partial-graph-dsl } ## Combining Sources and Sinks with simplified API -There is a simplified API you can use to combine sources and sinks with junctions like: `Broadcast[T]`, `Balance[T]`, -`Merge[In]` and `Concat[A]` without the need for using the Graph DSL. The combine method takes care of constructing +There is a simplified API you can use to combine sources and sinks with junctions like: +@scala[`Broadcast[T]`, `Balance[T]`,`Merge[In]` and `Concat[A]`]@java[`Broadcast`, `Balance`, `Merge` and `Concat`] +without the need for using the Graph DSL. The combine method takes care of constructing the necessary graph underneath. In following example we combine two sources into one (fan-in): -@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-combine } +Scala +: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #source-combine } -The same can be done for a `Sink[T]` but in this case it will be fan-out: +Java +: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #source-combine } + + +The same can be done for a @scala[`Sink[T]`]@java[`Sink`] but in this case it will be fan-out: + +Scala +: @@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #sink-combine } + +Java +: @@snip [StreamPartialGraphDSLDocTest.java]($code$/java/jdocs/stream/StreamPartialGraphDSLDocTest.java) { #sink-combine } -@@snip [StreamPartialGraphDSLDocSpec.scala]($code$/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala) { #sink-combine } ## Building reusable Graph components It is possible to build reusable, encapsulated components of arbitrary input and output ports using the graph DSL. As an example, we will build a graph junction that represents a pool of workers, where a worker is expressed -as a `Flow[I,O,_]`, i.e. a simple transformation of jobs of type `I` to results of type `O` (as you have seen +as a @scala[`Flow[I,O,_]`]@java[`Flow`], i.e. a simple transformation of jobs of type `I` to results of type `O` (as you have seen already, this flow can actually contain a complex graph inside). Our reusable worker pool junction will not preserve the order of the incoming jobs (they are assumed to have a proper ID field) and it will use a `Balance` junction to schedule jobs to available workers. On top of this, our junction will feature a "fastlane", a dedicated port @@ -172,7 +209,10 @@ where jobs of higher priority can be sent. Altogether, our junction will have two input ports of type `I` (for the normal and priority jobs) and an output port of type `O`. To represent this interface, we need to define a custom `Shape`. The following lines show how to do that. -@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape } +Scala +: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape } + + ## Predefined shapes @@ -190,20 +230,31 @@ with multiple input (or output) ports of different types. Since our shape has two input ports and one output port, we can just use the `FanInShape` DSL to define our custom shape: -@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape2 } +Scala +: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-shape2 } + + Now that we have a `Shape` we can wire up a Graph that represents our worker pool. First, we will merge incoming normal and priority jobs using `MergePreferred`, then we will send the jobs to a `Balance` junction which will fan-out to a configurable number of workers (flows), finally we merge all these results together and send them out through our only output port. This is expressed by the following code: -@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-create } +Scala +: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-create } + + + All we need to do now is to use our custom junction in a graph. The following code simulates some simple workers and jobs using plain strings and prints out the results. Actually we used *two* instances of our worker pool junction using `add()` twice. -@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-use } +Scala +: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-components-use } + + + ## Bidirectional Flows @@ -218,19 +269,30 @@ this purpose exists the special type `BidiFlow` which is a graph that has exactly two open inlets and two open outlets. The corresponding shape is called `BidiShape` and is defined like this: -@@snip [Shape.scala]($akka$/akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } +@@snip [Shape.scala]($akka$/akka-stream/src/main/scala/akka/stream/Shape.scala) { #bidi-shape } + A bidirectional flow is defined just like a unidirectional `Flow` as demonstrated for the codec mentioned above: -@@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #codec } +Scala +: @@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #codec } + +Java +: @@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #codec } + The first version resembles the partial graph constructor, while for the simple case of a functional 1:1 transformation there is a concise convenience method as shown on the last line. The implementation of the two functions is not difficult either: -@@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #codec-impl } +Scala +: @@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #codec-impl } + +Java +: @@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #codec-impl } + In this way you could easily integrate any other serialization library that turns an object into a sequence of bytes. @@ -238,16 +300,26 @@ turns an object into a sequence of bytes. The other stage that we talked about is a little more involved since reversing a framing protocol means that any received chunk of bytes may correspond to zero or more messages. This is best implemented using a `GraphStage` -(see also @ref:[Custom processing with GraphStage](stream-customize.md#graphstage)). +(see also @ref[Custom processing with GraphStage](stream-customize.md#graphstage)). + +Scala +: @@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #framing } + +Java +: @@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #framing } -@@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #framing } With these implementations we can build a protocol stack and test it: -@@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #compose } +Scala +: @@snip [BidiFlowDocSpec.scala]($code$/scala/docs/stream/BidiFlowDocSpec.scala) { #compose } + +Java +: @@snip [BidiFlowDocTest.java]($code$/java/jdocs/stream/BidiFlowDocTest.java) { #compose } + This example demonstrates how `BidiFlow` subgraphs can be hooked -together and also turned around with the `.reversed` method. The test +together and also turned around with the @scala[`.reversed`]@java[`.reversed()`] method. The test simulates both parties of a network communication protocol without actually having to open a network connection—the flows can just be connected directly. @@ -260,12 +332,22 @@ can be used in the graph as an ordinary source or outlet, and which will eventua If the materialized value is needed at more than one place, it is possible to call `materializedValue` any number of times to acquire the necessary number of outlets. -@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue } +Scala +: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue } + +Java +: @@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-matvalue } + Be careful not to introduce a cycle where the materialized value actually contributes to the materialized value. -The following example demonstrates a case where the materialized `Future` of a fold is fed back to the fold itself. +The following example demonstrates a case where the materialized @scala[`Future`]@java[`CompletionStage`] of a fold is fed back to the fold itself. + +Scala +: @@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue-cycle } + +Java +: @@snip [GraphDSLDocTest.java]($code$/java/jdocs/stream/GraphDSLDocTest.java) { #graph-dsl-matvalue-cycle } -@@snip [GraphDSLDocSpec.scala]($code$/scala/docs/stream/GraphDSLDocSpec.scala) { #graph-dsl-matvalue-cycle } ## Graph cycles, liveness and deadlocks @@ -289,7 +371,12 @@ see there are cases where this is very helpful. @@@ -@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #deadlocked } +Scala +: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #deadlocked } + +Java +: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #deadlocked } + Running this we observe that after a few numbers have been printed, no more elements are logged to the console - all processing stops after some time. After some investigation we observe that: @@ -307,7 +394,12 @@ If we modify our feedback loop by replacing the `Merge` junction with a `MergePr before trying the other lower priority input ports. Since we feed back through the preferred port it is always guaranteed that the elements in the cycles can flow. -@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #unfair } +Scala +: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #unfair } + +Java +: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #unfair } + If we run the example we see that the same sequence of numbers are printed over and over again, but the processing does not stop. Hence, we avoided the deadlock, but `source` is still @@ -325,7 +417,12 @@ be balanced (as many elements are removed as many are injected) then there would To make our cycle both live (not deadlocking) and fair we can introduce a dropping element on the feedback arc. In this case we chose the `buffer()` operation giving it a dropping strategy `OverflowStrategy.dropHead`. -@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #dropping } +Scala +: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #dropping } + +Java +: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #dropping } + If we run this example we see that @@ -344,7 +441,12 @@ the beginning instead. To achieve this we modify our first graph by replacing th Since `ZipWith` takes one element from `source` *and* from the feedback arc to inject one element into the cycle, we maintain the balance of elements. -@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-dead } +Scala +: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-dead } + +Java +: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-dead } + Still, when we try to run the example it turns out that no element is printed at all! After some investigation we realize that: @@ -356,7 +458,12 @@ These two conditions are a typical "chicken-and-egg" problem. The solution is to element into the cycle that is independent from `source`. We do this by using a `Concat` junction on the backwards arc that injects a single element using `Source.single`. -@@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-live } +Scala +: @@snip [GraphCyclesSpec.scala]($code$/scala/docs/stream/GraphCyclesSpec.scala) { #zipping-live } + +Java +: @@snip [GraphCyclesDocTest.java]($code$/java/jdocs/stream/GraphCyclesDocTest.java) { #zipping-live } + When we run the above example we see that processing starts and never stops. The important takeaway from this example is that balanced cycles often need an initial "kick-off" element to be injected into the cycle. diff --git a/akka-docs/src/main/paradox/scala/stream/stream-integrations.md b/akka-docs/src/main/paradox/scala/stream/stream-integrations.md index 6fda3f66fc..4d5e69813a 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-integrations.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-integrations.md @@ -49,7 +49,7 @@ If you don't care about the reply values and only use them as back-pressure sign can use `Sink.ignore` after the `mapAsync` stage and then actor is effectively a sink of the stream. -The same pattern can be used with @ref[Actor routers](../routing.md). Then you +The same pattern can be used with @ref:[Actor routers](../routing.md). Then you can use `mapAsyncUnordered` for better efficiency if you don't care about the order of the emitted downstream elements (the replies). @@ -493,7 +493,7 @@ Please note that a factory is necessary to achieve reusability of the resulting As described above any Akka Streams `Source` can be exposed as a Reactive Streams `Publisher` and any `Sink` can be exposed as a Reactive Streams `Subscriber`. Therefore we recommend that you -implement Reactive Streams integrations with built-in stages or @ref[custom stages](stream-customize.md). +implement Reactive Streams integrations with built-in stages or @ref:[custom stages](stream-customize.md). For historical reasons the `ActorPublisher` and `ActorSubscriber` traits are provided to support implementing Reactive Streams `Publisher` and `Subscriber` with @@ -524,7 +524,7 @@ type-safe and safe to implement `akka.stream.stage.GraphStage`. It can also expose a "stage actor ref" is needed to be addressed as-if an Actor. Custom stages implemented using `GraphStage` are also automatically fusable. -To learn more about implementing custom stages using it refer to @ref[Custom processing with GraphStage](stream-customize.md#graphstage). +To learn more about implementing custom stages using it refer to @ref:[Custom processing with GraphStage](stream-customize.md#graphstage). @@@ @@ -590,7 +590,7 @@ type-safe and safe to implement `akka.stream.stage.GraphStage`. It can also expose a "stage actor ref" is needed to be addressed as-if an Actor. Custom stages implemented using `GraphStage` are also automatically fusable. -To learn more about implementing custom stages using it refer to @ref[Custom processing with GraphStage](stream-customize.md#graphstage). +To learn more about implementing custom stages using it refer to @ref:[Custom processing with GraphStage](stream-customize.md#graphstage). @@@ diff --git a/akka-docs/src/main/paradox/scala/stream/stream-introduction.md b/akka-docs/src/main/paradox/scala/stream/stream-introduction.md index 8a115d5751..e42c00a33b 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-introduction.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-introduction.md @@ -63,13 +63,13 @@ composition, therefore it may take some careful study of this subject until you feel familiar with the tools and techniques. The documentation is here to help and for best results we recommend the following approach: - * Read the @ref[Quick Start Guide](stream-quickstart.md#stream-quickstart) to get a feel for how streams + * Read the @ref:[Quick Start Guide](stream-quickstart.md#stream-quickstart) to get a feel for how streams look like and what they can do. - * The top-down learners may want to peruse the @ref[Design Principles behind Akka Streams](../general/stream/stream-design.md) at this + * The top-down learners may want to peruse the @ref:[Design Principles behind Akka Streams](../general/stream/stream-design.md) at this point. * The bottom-up learners may feel more at home rummaging through the -@ref[Streams Cookbook](stream-cookbook.md). +@ref:[Streams Cookbook](stream-cookbook.md). * For a complete overview of the built-in processing stages you can look at the -table in @ref[stages overview](stages-overview.md) +table in @ref:[stages overview](stages-overview.md) * The other sections can be read sequentially or as needed during the previous steps, each digging deeper into specific topics. \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/stream/stream-io.md b/akka-docs/src/main/paradox/scala/stream/stream-io.md index 44dd8cd068..2cddad7b0c 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-io.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-io.md @@ -1,7 +1,7 @@ # Working with streaming IO Akka Streams provides a way of handling File IO and TCP connections with Streams. -While the general approach is very similar to the @ref[Actor based TCP handling](../io-tcp.md) using Akka IO, +While the general approach is very similar to the @ref:[Actor based TCP handling](../io-tcp.md) using Akka IO, by using Akka Streams you are freed of having to manually react to back-pressure signals, as the library does it transparently for you. @@ -23,7 +23,7 @@ Java Next, we simply handle *each* incoming connection using a `Flow` which will be used as the processing stage to handle and emit `ByteString` s from and to the TCP Socket. Since one `ByteString` does not have to necessarily correspond to exactly one line of text (the client might be sending the line in chunks) we use the @scala[`Framing.delimiter`]@java[`delimiter`] -helper Flow @scala[]@java[from `akka.stream.javadsl.Framing`] to chunk the inputs up into actual lines of text. The last boolean +helper Flow @java[from `akka.stream.javadsl.Framing`] to chunk the inputs up into actual lines of text. The last boolean argument indicates that we require an explicit line ending even for the last message before the connection is closed. In this example we simply add exclamation marks to each incoming text message and push it through the flow: @@ -45,17 +45,10 @@ It is also possible to shut down the server's socket by cancelling the `Incoming We can then test the TCP server by sending data to the TCP Socket using `netcat`: -@scala[ ``` $ echo -n "Hello World" | netcat 127.0.0.1 8888 Hello World!!! ``` -]@java[ -``` -$ echo -n "Hello World" | netcat 127.0.0.1 8889 -Hello World!!! -``` -] ### Connecting: REPL Client @@ -86,7 +79,7 @@ When writing such end-to-end back-pressured systems you may sometimes end up in in which *either side is waiting for the other one to start the conversation*. One does not need to look far to find examples of such back-pressure loops. In the two examples shown previously, we always assumed that the side we are connecting to would start the conversation, which effectively means both sides are back-pressured and can not get -the conversation started. There are multiple ways of dealing with this which are explained in depth in @ref[Graph cycles, liveness and deadlocks](stream-graphs.md#graph-cycles), +the conversation started. There are multiple ways of dealing with this which are explained in depth in @ref:[Graph cycles, liveness and deadlocks](stream-graphs.md#graph-cycles), however in client-server scenarios it is often the simplest to make either side simply send an initial message. @@@ note diff --git a/akka-docs/src/main/paradox/scala/stream/stream-parallelism.md b/akka-docs/src/main/paradox/scala/stream/stream-parallelism.md index e2bc035e27..12a25f7efe 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-parallelism.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-parallelism.md @@ -5,7 +5,7 @@ and executed sequentially by default. This avoids the overhead of events crossin limits the flow to execute at most one stage at any given time. In many cases it is useful to be able to concurrently execute the stages of a flow, this is done by explicitly marking -them as asynchronous using the `async` method. Each processing stage marked as asynchronous will run in a +them as asynchronous using the @scala[`async`]@java[`async()`] method. Each processing stage marked as asynchronous will run in a dedicated actor internally, while all stages not marked asynchronous will run in one single actor. We will illustrate through the example of pancake cooking how streams can be used for various processing patterns, @@ -23,7 +23,11 @@ are two pancakes being cooked at the same time, one being cooked on its first si completion. This is how this setup would look like implemented as a stream: -@@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelining } +Scala +: @@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelining } + +Java +: @@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelining } The two `map` stages in sequence (encapsulated in the "frying pan" flows) will be executed in a pipelined way, basically doing the same as Roland with his frying pans: @@ -54,7 +58,11 @@ the results on a shared plate. Whenever a pan becomes empty, he takes the next s In essence he parallelizes the same process over multiple pans. This is how this setup will look like if implemented using streams: -@@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallelism } +Scala +: @@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallelism } + +Java +: @@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #parallelism } The benefit of parallelizing is that it is easy to scale. In the pancake example it is easy to add a third frying pan with Patrik's method, but Roland cannot add a third frying pan, @@ -76,7 +84,11 @@ First, let's look at how we can parallelize pipelined processing stages. In the will employ two chefs, each working using Roland's pipelining method, but we use the two chefs in parallel, just like Patrik used the two frying pans. This is how it looks like if expressed as streams: -@@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallel-pipeline } +Scala +: @@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #parallel-pipeline } + +Java +: @@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #parallel-pipeline } The above pattern works well if there are many independent jobs that do not depend on the results of each other, but the jobs themselves need multiple processing steps where each step builds on the result of @@ -93,7 +105,11 @@ plate. This is again straightforward to implement with the streams API: -@@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelined-parallel } +Scala +: @@snip [FlowParallelismDocSpec.scala]($code$/scala/docs/stream/FlowParallelismDocSpec.scala) { #pipelined-parallel } + +Java +: @@snip [FlowParallelismDocTest.java]($code$/java/jdocs/stream/FlowParallelismDocTest.java) { #pipelined-parallel } This usage pattern is less common but might be usable if a certain step in the pipeline might take wildly different times to finish different jobs. The reason is that there are more balance-merge steps in this pattern diff --git a/akka-docs/src/main/paradox/scala/stream/stream-quickstart.md b/akka-docs/src/main/paradox/scala/stream/stream-quickstart.md index 6a88f350eb..01c3056fda 100644 --- a/akka-docs/src/main/paradox/scala/stream/stream-quickstart.md +++ b/akka-docs/src/main/paradox/scala/stream/stream-quickstart.md @@ -1,9 +1,7 @@ - - -# Quick Start Guide +# Streams Quickstart Guide Create a project and add the akka-streams dependency to the build tool of your -choice as described in @ref[Using a build tool](../guide/quickstart.md). +choice. A stream usually begins at a source, so this is also how we start an Akka Stream. Before we create one, we import the full complement of streaming tools: @@ -87,7 +85,7 @@ There are other ways to create a materializer, e.g. from an thing that makes streams run—you don’t need to worry about any of the details just now apart from that you need one for calling any of the `run` methods on a `Source`. @scala[The materializer is picked up implicitly if it is omitted -from the `run` method call arguments, which we will do in the following.]@java[] +from the `run` method call arguments, which we will do in the following.] The nice thing about Akka Streams is that the `Source` is just a description of what you want to run, and like an architect’s blueprint it can @@ -196,7 +194,7 @@ second the throttle combinator will assert *back-pressure* upstream. This is basically all there is to Akka Streams in a nutshell—glossing over the fact that there are dozens of sources and sinks and many more stream -transformation combinators to choose from, see also @ref[stages overview](stages-overview.md). +transformation combinators to choose from, see also @ref:[stages overview](stages-overview.md). # Reactive Tweets @@ -221,7 +219,7 @@ Java @@@ note If you would like to get an overview of the used vocabulary first instead of diving head-first -into an actual example you can have a look at the @ref[Core concepts](stream-flows-and-basics.md#core-concepts) and @ref[Defining and running streams](stream-flows-and-basics.md#defining-and-running-streams) +into an actual example you can have a look at the @ref:[Core concepts](stream-flows-and-basics.md#core-concepts) and @ref:[Defining and running streams](stream-flows-and-basics.md#defining-and-running-streams) sections of the docs, and then come back to this quickstart to see it all pieced together into a simple example application. @@@ @@ -241,7 +239,7 @@ Java : @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #materializer-setup } The `ActorMaterializer` can optionally take `ActorMaterializerSettings` which can be used to define -materialization properties, such as default buffer sizes (see also @ref[Buffers for asynchronous stages](stream-rate.md#async-stream-buffers)), the dispatcher to +materialization properties, such as default buffer sizes (see also @ref:[Buffers for asynchronous stages](stream-rate.md#async-stream-buffers)), the dispatcher to be used by the pipeline etc. These can be overridden with `withAttributes` on `Flow`, `Source`, `Sink` and `Graph`. Let's assume we have a stream of tweets readily available. In Akka this is expressed as a @scala[`Source[Out, M]`]@java[`Source`]: @@ -271,9 +269,9 @@ Scala Java : @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #authors-filter-map } -Finally in order to @ref[materialize](stream-flows-and-basics.md#stream-materialization) and run the stream computation we need to attach +Finally in order to @ref:[materialize](stream-flows-and-basics.md#stream-materialization) and run the stream computation we need to attach the Flow to a @scala[`Sink`]@java[`Sink`] that will get the Flow running. The simplest way to do this is to call -`runWith(sink)` on a @scala[`Source`]@java[`Source`]. For convenience a number of common Sinks are predefined and collected as @scala[]@java[static] methods on +`runWith(sink)` on a @scala[`Source`]@java[`Source`]. For convenience a number of common Sinks are predefined and collected as @java[static] methods on the @scala[`Sink` companion object]@java[`Sink class`]. For now let's simply print each author: @@ -362,16 +360,16 @@ Both `Graph` and `RunnableGraph` are *immutable, thread-safe, and freely shareab A graph can also have one of several other shapes, with one or more unconnected ports. Having unconnected ports expresses a graph that is a *partial graph*. Concepts around composing and nesting graphs in large structures are -explained in detail in @ref[Modularity, Composition and Hierarchy](stream-composition.md). It is also possible to wrap complex computation graphs +explained in detail in @ref:[Modularity, Composition and Hierarchy](stream-composition.md). It is also possible to wrap complex computation graphs as Flows, Sinks or Sources, which will be explained in detail in -@scala[@ref[Constructing Sources, Sinks and Flows from Partial Graphs](stream-graphs.md#constructing-sources-sinks-flows-from-partial-graphs)]@java[@ref:[Constructing and combining Partial Graphs](stream-graphs.md#partial-graph-dsl)]. +@scala[@ref:[Constructing Sources, Sinks and Flows from Partial Graphs](stream-graphs.md#constructing-sources-sinks-flows-from-partial-graphs)]@java[@ref:[Constructing and combining Partial Graphs](stream-graphs.md#partial-graph-dsl)]. ## Back-pressure in action One of the main advantages of Akka Streams is that they *always* propagate back-pressure information from stream Sinks (Subscribers) to their Sources (Publishers). It is not an optional feature, and is enabled at all times. To learn more about the back-pressure protocol used by Akka Streams and all other Reactive Streams compatible implementations read -@ref[Back-pressure explained](stream-flows-and-basics.md#back-pressure-explained). +@ref:[Back-pressure explained](stream-flows-and-basics.md#back-pressure-explained). A typical problem applications (not using Akka Streams) like this often face is that they are unable to process the incoming data fast enough, either temporarily or by design, and will start buffering incoming data until there's no more space to buffer, resulting @@ -423,7 +421,7 @@ has also a type parameter of @scala[`Future[Int]`]@java[`CompletionStage>`]. Next we call `run()` which uses the @scala[implicit]@java[] `ActorMaterializer` +be `run()`, as indicated by its type: @scala[`RunnableGraph[Future[Int]]`]@java[`RunnableGraph>`]. Next we call `run()` which uses the @scala[implicit] `ActorMaterializer` to materialize and run the Flow. The value returned by calling `run()` on a @scala[`RunnableGraph[T]`]@java[`RunnableGraph`] is of type `T`. In our case this type is @scala[`Future[Int]`]@java[`CompletionStage`] which, when completed, will contain the total length of our `tweets` stream. In case of the stream failing, this future would complete with a Failure. @@ -440,7 +438,7 @@ Java : @@snip [TwitterStreamQuickstartDocTest.java]($code$/java/jdocs/stream/TwitterStreamQuickstartDocTest.java) { #tweets-runnable-flow-materialized-twice } Many elements in Akka Streams provide materialized values which can be used for obtaining either results of computation or -steering these elements which will be discussed in detail in @ref[Stream Materialization](stream-flows-and-basics.md#stream-materialization). Summing up this section, now we know +steering these elements which will be discussed in detail in @ref:[Stream Materialization](stream-flows-and-basics.md#stream-materialization). Summing up this section, now we know what happens behind the scenes when we run this one-liner, which is equivalent to the multi line version above: Scala diff --git a/akka-docs/src/main/paradox/scala/testing.md b/akka-docs/src/main/paradox/scala/testing.md index 9edd01352f..4b7dfc4d67 100644 --- a/akka-docs/src/main/paradox/scala/testing.md +++ b/akka-docs/src/main/paradox/scala/testing.md @@ -361,7 +361,7 @@ above; just use the power! @@@ warning -Any message send from a `TestProbe` to another actor which runs on the +Any message sent from a `TestProbe` to another actor which runs on the CallingThreadDispatcher runs the risk of dead-lock, if that other actor might also send to this probe. The implementation of `TestProbe.watch` and `TestProbe.unwatch` will also send a message to the watchee, which diff --git a/akka-docs/src/main/paradox/scala/typed-actors.md b/akka-docs/src/main/paradox/scala/typed-actors.md index 2d767ca722..f04520e610 100644 --- a/akka-docs/src/main/paradox/scala/typed-actors.md +++ b/akka-docs/src/main/paradox/scala/typed-actors.md @@ -46,13 +46,17 @@ They have their niche, use them sparingly. Before we create our first Typed Actor we should first go through the tools that we have at our disposal, it's located in `akka.actor.TypedActor`. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-extension-tools } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-extension-tools } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-extension-tools } @@@ warning Same as not exposing `this` of an Akka Actor, it's important not to expose `this` of a Typed Actor, instead you should pass the external proxy reference, which is obtained from within your Typed Actor as -`TypedActor.self`, this is your external identity, as the `ActorRef` is the external identity of +@scala[`TypedActor.self`]@java[`TypedActor.self()`], this is your external identity, as the `ActorRef` is the external identity of an Akka Actor. @@@ @@ -63,34 +67,62 @@ To create a Typed Actor you need to have one or more interfaces, and one impleme The following imports are assumed: -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #imports } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #imports } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #imports } Our example interface: -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } Our example implementation of that interface: -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } The most trivial way of creating a Typed Actor instance of our `Squarer`: -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create1 } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create1 } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create1 } First type is the type of the proxy, the second type is the type of the implementation. If you need to call a specific constructor you do it like this: -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create2 } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-create2 } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-create2 } Since you supply a `Props`, you can specify which dispatcher to use, what the default timeout should be used and more. Now, our `Squarer` doesn't have any methods, so we'd better add those. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-iface } -Alright, now we've got some methods we can call, but we need to implement those in SquarerImpl. +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-iface } -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } +Alright, now we've got some methods we can call, but we need to implement those in `SquarerImpl`. + +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-impl } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-impl } Excellent, now we have an interface and an implementation of that interface, and we know how to create a Typed Actor from that, so let's look at calling these methods. @@ -99,6 +131,8 @@ and we know how to create a Typed Actor from that, so let's look at calling thes Methods returning: +@@@ div {.group-scala} + * `Unit` will be dispatched with `fire-and-forget` semantics, exactly like `ActorRef.tell` * `scala.concurrent.Future[_]` will use `send-request-reply` semantics, exactly like `ActorRef.ask` * `scala.Option[_]` will use `send-request-reply` semantics, but *will* block to wait for an answer, @@ -107,6 +141,23 @@ Any exception that was thrown during this call will be rethrown. * Any other type of value will use `send-request-reply` semantics, but *will* block to wait for an answer, throwing `java.util.concurrent.TimeoutException` if there was a timeout or rethrow any exception that was thrown during this call. +@@@ + +@@@ div {.group-java} + + * `void` will be dispatched with `fire-and-forget` semantics, exactly like `ActorRef.tell` + * `scala.concurrent.Future` will use `send-request-reply` semantics, exactly like `ActorRef.ask` + * `akka.japi.Option` will use `send-request-reply` semantics, but *will* block to wait for an answer, +and return `akka.japi.Option.None` if no answer was produced within the timeout, or `akka.japi.Option.Some` containing the result otherwise. +Any exception that was thrown during this call will be rethrown. + * Any other type of value will use `send-request-reply` semantics, but *will* block to wait for an answer, +throwing `java.util.concurrent.TimeoutException` if there was a timeout or rethrow any exception that was thrown during this call. +Note that due to the Java exception and reflection mechanisms, such a `TimeoutException` will be wrapped in a `java.lang.reflect.UndeclaredThrowableException` +unless the interface method explicitly declares the `TimeoutException` as a thrown checked exception. + +@@@ + + ## Messages and immutability While Akka cannot enforce that the parameters to the methods of your Typed Actors are immutable, @@ -114,25 +165,51 @@ we *strongly* recommend that parameters passed are immutable. ### One-way message send -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-oneway } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-oneway } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-oneway } As simple as that! The method will be executed on another thread; asynchronously. ### Request-reply message send -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-option } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-option } -This will block for as long as the timeout that was set in the Props of the Typed Actor, +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-option } + +This will block for as long as the timeout that was set in the `Props` of the Typed Actor, if needed. It will return `None` if a timeout occurs. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-strict } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-strict } -This will block for as long as the timeout that was set in the Props of the Typed Actor, +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-strict } + +This will block for as long as the timeout that was set in the `Props` of the Typed Actor, if needed. It will throw a `java.util.concurrent.TimeoutException` if a timeout occurs. +@@@ div {.group-java} + +Note that here, such a `TimeoutException` will be wrapped in a +`java.lang.reflect.UndeclaredThrowableException` by the Java reflection mechanism, +because the interface method does not explicitly declare the `TimeoutException` as a thrown checked exception. +To get the `TimeoutException` directly, declare `throws java.util.concurrent.TimeoutException` at the +interface method. + +@@@ + ### Request-reply-with-future message send -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-future } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-call-future } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-call-future } This call is asynchronous, and the Future returned can be used for asynchronous composition. @@ -140,11 +217,19 @@ This call is asynchronous, and the Future returned can be used for asynchronous Since Akka's Typed Actors are backed by Akka Actors they must be stopped when they aren't needed anymore. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-stop } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-stop } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-stop } This asynchronously stops the Typed Actor associated with the specified proxy ASAP. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-poisonpill } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-poisonpill } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-poisonpill } This asynchronously stops the Typed Actor associated with the specified proxy after it's done with all calls that were made prior to this call. @@ -152,12 +237,16 @@ after it's done with all calls that were made prior to this call. ## Typed Actor Hierarchies Since you can obtain a contextual Typed Actor Extension by passing in an `ActorContext` -you can create child Typed Actors by invoking `typedActorOf(..)` on that: +you can create child Typed Actors by invoking `typedActorOf(..)` on that. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-hierarchy } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-hierarchy } -You can also create a child Typed Actor in regular Akka Actors by giving the `ActorContext` -as an input parameter to TypedActor.get(…). +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-hierarchy } + +You can also create a child Typed Actor in regular Akka Actors by giving the @scala[`ActorContext`]@java[`AbstractActor.ActorContext`] +as an input parameter to `TypedActor.get(…)`. ## Supervisor Strategy @@ -199,7 +288,13 @@ The ActorRef needs to accept `MethodCall` messages. Since `TypedActors` are backed by `Akka Actors`, you can use `typedActorOf` to proxy `ActorRefs` potentially residing on remote nodes. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-remote } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-remote } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-actor-remote } + +@@@ div {.group-scala} ## Supercharging @@ -209,6 +304,8 @@ Here's an example on how you can use traits to mix in behavior in your Typed Act @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-actor-supercharge-usage } +@@@ + ## Typed Router pattern Sometimes you want to spread messages between multiple actors. The easiest way to achieve this in Akka is to use a @ref:[Router](routing.md), @@ -217,10 +314,18 @@ which can implement a specific routing logic, such as `smallest-mailbox` or `con Routers are not provided directly for typed actors, but it is really easy to leverage an untyped router and use a typed proxy in front of it. To showcase this let's create typed actors that assign themselves some random `id`, so we know that in fact, the router has sent the message to different actors: -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router-types } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router-types } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-router-types } In order to round robin among a few instances of such actors, you can simply create a plain untyped router, and then facade it with a `TypedActor` like shown in the example below. This works because typed actors of course communicate using the same mechanisms as normal actors, and methods calls on them get transformed into message sends of `MethodCall` messages. -@@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router } +Scala +: @@snip [TypedActorDocSpec.scala]($code$/scala/docs/actor/TypedActorDocSpec.scala) { #typed-router } + +Java +: @@snip [TypedActorDocTest.java]($code$/java/jdocs/actor/TypedActorDocTest.java) { #typed-router } diff --git a/akka-docs/src/main/paradox/scala/typed.md b/akka-docs/src/main/paradox/scala/typed.md index 4c52ccb562..751d74bcfa 100644 --- a/akka-docs/src/main/paradox/scala/typed.md +++ b/akka-docs/src/main/paradox/scala/typed.md @@ -83,8 +83,8 @@ also typed as such. This is why we can access the `whom` and `replyTo` members without needing to use a pattern match. On the last line we see the `HelloWorld` Actor send a message to another -Actor, which is done using the `tell` method (represented by the `!` operator). -Since the `replyTo` address is declared to be of type `ActorRef[Greeted]` the +Actor, which is done using the @scala[`!` operator (pronounced “tell”).]@java[`tell` method.] +Since the `replyTo` address is declared to be of type @scala[`ActorRef[Greeted]`]@java[`ActorRef`], the compiler will only permit us to send messages of this type, other usage will not be accepted. @@ -92,7 +92,7 @@ The accepted message types of an Actor together with all reply types defines the protocol spoken by this Actor; in this case it is a simple request–reply protocol but Actors can model arbitrarily complex protocols when needed. The protocol is bundled together with the behavior that implements it in a nicely -wrapped scope—the `HelloWorld` object. +wrapped scope—the `HelloWorld` @scala[object]@java[class]. Now we want to try out this Actor, so we must start an ActorSystem to host it: @@ -103,13 +103,15 @@ Java : @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #hello-world } After importing the Actor’s protocol definition we start an Actor system from -the defined behavior. +the defined `greeter` behavior. As Carl Hewitt said, one Actor is no Actor—it would be quite lonely with nobody to talk to. In this sense the example is a little cruel because we only give the `HelloWorld` Actor a fake person to talk to—the “ask” pattern (represented by the `?` operator) can be used to send a message such that the -reply fulfills a Promise to which we get back the corresponding Future. +reply fulfills a @scala[`Promise` to which we get back the corresponding `Future`]@java[`CompletionStage`]. + +@@@ div {.group-scala} Note that the `Future` that is returned by the “ask” operation is properly typed already, no type checks or casts needed. This is possible due to @@ -120,15 +122,35 @@ parameter which we fill in is of type `ActorRef[Greeted]`, which means that the value that fulfills the `Promise` can only be of type `Greeted`. +@@@ + +@@@ div {.group-java} + +Note that the `CompletionStage` that is returned by the “ask” operation is +properly typed already, no type checks or casts needed. This is possible due to +the type information that is part of the message protocol: the `ask` operator +takes as argument a function that pass an `ActorRef`, which is the +`replyTo` parameter of the `Greet` message, which means that when sending +the reply message to that `ActorRef` the message that fulfills the +`CompletionStage` can only be of type `Greeted`. + +@@@ + We use this here to send the `Greet` command to the Actor and when the reply comes back we will print it out and tell the actor system to shut down. Once that is done as well we print the `"system terminated"` messages and the -program ends. The `recovery` combinator on the original `Future` is +program ends. + +@@@ div {.group-scala} + +The `recovery` combinator on the original `Future` is needed in order to ensure proper system shutdown even in case something went wrong; the `flatMap` and `map` combinators that the `for` expression gets turned into care only about the “happy path” and if the `future` failed with a timeout then no `greeting` would be extracted and nothing would happen. +@@@ + This shows that there are aspects of Actor messaging that can be type-checked by the compiler, but this ability is not unlimited, there are bounds to what we can statically express. Before we go on with a more complex (and realistic) @@ -202,7 +224,7 @@ Scala Java : @@snip [IntroSpec.scala]($akka$/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java) { #chatroom-protocol } -Initially the client Actors only get access to an `ActorRef[GetSession]` +Initially the client Actors only get access to an @scala[`ActorRef[GetSession]`]@java[`ActorRef`] which allows them to make the first step. Once a client’s session has been established it gets a `SessionGranted` message that contains a `handle` to unlock the next protocol step, posting messages. The `PostMessage` @@ -248,15 +270,15 @@ has `private` visibility and can't be created outside the actor. If we did not care about securing the correspondence between a session and a screen name then we could change the protocol such that `PostMessage` is -removed and all clients just get an `ActorRef[PostSessionMessage]` to +removed and all clients just get an @scala[`ActorRef[PostSessionMessage]`]@java[`ActorRef`] to send to. In this case no wrapper would be needed and we could just use -`ctx.self`. The type-checks work out in that case because -`ActorRef[-T]` is contravariant in its type parameter, meaning that we -can use a `ActorRef[Command]` wherever an -`ActorRef[PostSessionMessage]` is needed—this makes sense because the +@scala[`ctx.self`]@java[`ctx.getSelf()`]. The type-checks work out in that case because +@scala[`ActorRef[-T]`]@java[`ActorRef`] is contravariant in its type parameter, meaning that we +can use a @scala[`ActorRef[Command]`]@java[`ActorRef`] wherever an +@scala[`ActorRef[PostSessionMessage]`]@java[`ActorRef`] is needed—this makes sense because the former simply speaks more languages than the latter. The opposite would be -problematic, so passing an `ActorRef[PostSessionMessage]` where -`ActorRef[Command]` is required will lead to a type error. +problematic, so passing an @scala[`ActorRef[PostSessionMessage]`]@java[`ActorRef`] where +@scala[`ActorRef[Command]`]@java[`ActorRef`] is required will lead to a type error. ### Trying it out @@ -273,11 +295,16 @@ post a message, wait to see it published, and then terminate. The last step requires the ability to change behavior, we need to transition from the normal running behavior into the terminated state. This is why here we do not return `same`, as above, but another special value `stopped`. + +@@@ div {.group-scala} + Since `SessionEvent` is a sealed trait the Scala compiler will warn us if we forget to handle one of the subtypes; in this case it reminded us that alternatively to `SessionGranted` we may also receive a `SessionDenied` event. +@@@ + Now to try things out we must start both a chat room and a gabbler and of course we do this inside an Actor system. Since there can be only one guardian supervisor we could either start the chat room from the gabbler (which we don’t @@ -294,7 +321,7 @@ Java In good tradition we call the `main` Actor what it is, it directly corresponds to the `main` method in a traditional Java application. This Actor will perform its job on its own accord, we do not need to send messages -from the outside, so we declare it to be of type `NotUsed`. Actors receive not +from the outside, so we declare it to be of type @scala[`NotUsed`]@java[`Void`]. Actors receive not only external messages, they also are notified of certain system events, so-called Signals. In order to get access to those we choose to implement this particular one using the `immutable` behavior decorator. The @@ -346,5 +373,5 @@ A side-effect of this is that behaviors can now be tested in isolation without having to be packaged into an Actor, tests can run fully synchronously without having to worry about timeouts and spurious failures. Another side-effect is that behaviors can nicely be composed and decorated, see `tap`, or -`widen` combinators; nothing about these is special or internal, new +@scala[`widen`]@java[`widened`] combinators; nothing about these is special or internal, new combinators can be written as external libraries or tailor-made for each project. diff --git a/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java b/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java index dd9b46d221..191b7e3f71 100644 --- a/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java +++ b/akka-docs/src/test/java/jdocs/actor/ActorDocTest.java @@ -49,6 +49,7 @@ import static akka.pattern.PatternsCS.gracefulStop; import akka.pattern.AskTimeoutException; import scala.concurrent.duration.Duration; import java.util.concurrent.CompletionStage; + //#import-gracefulStop //#import-terminated import akka.actor.Terminated; diff --git a/akka-docs/src/test/java/jdocs/actor/TimerDocTest.java b/akka-docs/src/test/java/jdocs/actor/TimerDocTest.java new file mode 100644 index 0000000000..1d384f94ef --- /dev/null +++ b/akka-docs/src/test/java/jdocs/actor/TimerDocTest.java @@ -0,0 +1,46 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ + +package jdocs.actor; + +//#timers +import java.util.concurrent.TimeUnit; +import scala.concurrent.duration.Duration; +import akka.actor.AbstractActorWithTimers; + +//#timers + +public class TimerDocTest { + + static + //#timers + public class MyActor extends AbstractActorWithTimers { + + private static Object TICK_KEY = "TickKey"; + private static final class FirstTick { + } + private static final class Tick { + } + + public MyActor() { + getTimers().startSingleTimer(TICK_KEY, new FirstTick(), + Duration.create(500, TimeUnit.MILLISECONDS)); + } + + @Override + public Receive createReceive() { + return receiveBuilder() + .match(FirstTick.class, message -> { + // do something useful here + getTimers().startPeriodicTimer(TICK_KEY, new Tick(), + Duration.create(1, TimeUnit.SECONDS)); + }) + .match(Tick.class, message -> { + // do something useful here + }) + .build(); + } + } + //#timers +} diff --git a/akka-docs/src/test/java/jdocs/cluster/FactorialFrontend.java b/akka-docs/src/test/java/jdocs/cluster/FactorialFrontend.java index 2d35d43718..34f7d05429 100644 --- a/akka-docs/src/test/java/jdocs/cluster/FactorialFrontend.java +++ b/akka-docs/src/test/java/jdocs/cluster/FactorialFrontend.java @@ -2,6 +2,8 @@ package jdocs.cluster; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; +import java.util.Set; import java.util.concurrent.TimeUnit; import akka.actor.Props; @@ -77,12 +79,12 @@ abstract class FactorialFrontend2 extends AbstractActor { int totalInstances = 100; Iterable routeesPaths = Arrays.asList("/user/factorialBackend", ""); boolean allowLocalRoutees = true; - String useRole = "backend"; + Set useRoles = new HashSet<>(Arrays.asList("backend")); ActorRef backend = getContext().actorOf( new ClusterRouterGroup(new AdaptiveLoadBalancingGroup( HeapMetricsSelector.getInstance(), Collections. emptyList()), new ClusterRouterGroupSettings(totalInstances, routeesPaths, - allowLocalRoutees, useRole)).props(), "factorialBackendRouter2"); + allowLocalRoutees, useRoles)).props(), "factorialBackendRouter2"); //#router-lookup-in-code } @@ -93,12 +95,12 @@ abstract class FactorialFrontend3 extends AbstractActor { int totalInstances = 100; int maxInstancesPerNode = 3; boolean allowLocalRoutees = false; - String useRole = "backend"; + Set useRoles = new HashSet<>(Arrays.asList("backend")); ActorRef backend = getContext().actorOf( new ClusterRouterPool(new AdaptiveLoadBalancingPool( SystemLoadAverageMetricsSelector.getInstance(), 0), new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, - allowLocalRoutees, useRole)).props(Props + allowLocalRoutees, useRoles)).props(Props .create(FactorialBackend.class)), "factorialBackendRouter3"); //#router-deploy-in-code } diff --git a/akka-docs/src/test/java/jdocs/cluster/StatsService.java b/akka-docs/src/test/java/jdocs/cluster/StatsService.java index 61509307c1..851522b170 100644 --- a/akka-docs/src/test/java/jdocs/cluster/StatsService.java +++ b/akka-docs/src/test/java/jdocs/cluster/StatsService.java @@ -13,7 +13,10 @@ import akka.actor.AbstractActor; import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope; import akka.routing.FromConfig; +import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; +import java.util.Set; //#service public class StatsService extends AbstractActor { @@ -55,11 +58,11 @@ abstract class StatsService2 extends AbstractActor { Iterable routeesPaths = Collections .singletonList("/user/statsWorker"); boolean allowLocalRoutees = true; - String useRole = "compute"; + Set useRoles = new HashSet<>(Arrays.asList("compute")); ActorRef workerRouter = getContext().actorOf( new ClusterRouterGroup(new ConsistentHashingGroup(routeesPaths), new ClusterRouterGroupSettings(totalInstances, routeesPaths, - allowLocalRoutees, useRole)).props(), "workerRouter2"); + allowLocalRoutees, useRoles)).props(), "workerRouter2"); //#router-lookup-in-code } @@ -69,11 +72,11 @@ abstract class StatsService3 extends AbstractActor { int totalInstances = 100; int maxInstancesPerNode = 3; boolean allowLocalRoutees = false; - String useRole = "compute"; + Set useRoles = new HashSet<>(Arrays.asList("compute")); ActorRef workerRouter = getContext().actorOf( new ClusterRouterPool(new ConsistentHashingPool(0), new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, - allowLocalRoutees, useRole)).props(Props + allowLocalRoutees, useRoles)).props(Props .create(StatsWorker.class)), "workerRouter3"); //#router-deploy-in-code } diff --git a/akka-docs/src/test/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java b/akka-docs/src/test/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java index 1001c790fa..2b01181b43 100644 --- a/akka-docs/src/test/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java +++ b/akka-docs/src/test/java/jdocs/dispatcher/MyUnboundedMessageQueueSemantics.java @@ -4,8 +4,8 @@ package jdocs.dispatcher; -//#mailbox-implementation-example +//#mailbox-marker-interface // Marker interface used for mailbox requirements mapping public interface MyUnboundedMessageQueueSemantics { } -//#mailbox-implementation-example +//#mailbox-marker-interface diff --git a/akka-docs/src/test/java/jdocs/future/ActorWithFuture.java b/akka-docs/src/test/java/jdocs/future/ActorWithFuture.java new file mode 100644 index 0000000000..9d387fbd1c --- /dev/null +++ b/akka-docs/src/test/java/jdocs/future/ActorWithFuture.java @@ -0,0 +1,17 @@ +package jdocs.future; + +//#context-dispatcher +import akka.actor.AbstractActor; +import akka.dispatch.Futures; + +public class ActorWithFuture extends AbstractActor { + ActorWithFuture(){ + Futures.future(() -> "hello", getContext().dispatcher()); + } + + @Override + public Receive createReceive() { + return AbstractActor.emptyBehavior(); + } +} +// #context-dispatcher diff --git a/akka-docs/src/test/java/jdocs/future/FutureDocTest.java b/akka-docs/src/test/java/jdocs/future/FutureDocTest.java index 60bb2ddaba..f4fb73d864 100644 --- a/akka-docs/src/test/java/jdocs/future/FutureDocTest.java +++ b/akka-docs/src/test/java/jdocs/future/FutureDocTest.java @@ -11,6 +11,8 @@ import scala.concurrent.Future; import scala.concurrent.Await; import scala.concurrent.Promise; import akka.util.Timeout; + + //#imports1 //#imports2 @@ -19,27 +21,39 @@ import akka.japi.Function; import java.util.concurrent.Callable; import static akka.dispatch.Futures.future; import static java.util.concurrent.TimeUnit.SECONDS; + + //#imports2 //#imports3 import static akka.dispatch.Futures.sequence; + + //#imports3 //#imports4 import static akka.dispatch.Futures.traverse; + + //#imports4 //#imports5 import akka.japi.Function2; import static akka.dispatch.Futures.fold; + + //#imports5 //#imports6 import static akka.dispatch.Futures.reduce; + + //#imports6 //#imports7 import static akka.pattern.Patterns.after; + + import java.util.Arrays; //#imports7 diff --git a/akka-docs/src/test/java/jdocs/pattern/SchedulerPatternTest.java b/akka-docs/src/test/java/jdocs/pattern/SchedulerPatternTest.java deleted file mode 100644 index 237abd2799..0000000000 --- a/akka-docs/src/test/java/jdocs/pattern/SchedulerPatternTest.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Copyright (C) 2009-2017 Lightbend Inc. - */ - -package jdocs.pattern; - -import akka.actor.*; -import akka.testkit.*; -import akka.testkit.TestEvent.Mute; -import akka.testkit.TestEvent.UnMute; -import jdocs.AbstractJavaTest; -import akka.testkit.javadsl.TestKit; -import org.junit.*; -import scala.concurrent.duration.Duration; -import scala.concurrent.duration.FiniteDuration; -import java.util.Arrays; -import java.util.concurrent.TimeUnit; - -public class SchedulerPatternTest extends AbstractJavaTest { - - @ClassRule - public static AkkaJUnitActorSystemResource actorSystemResource = - new AkkaJUnitActorSystemResource("SchedulerPatternTest", AkkaSpec.testConf()); - - private final ActorSystem system = actorSystemResource.getSystem(); - - static - //#schedule-constructor - public class ScheduleInConstructor extends AbstractActor { - - private final Cancellable tick = getContext().getSystem().scheduler().schedule( - Duration.create(500, TimeUnit.MILLISECONDS), - Duration.create(1, TimeUnit.SECONDS), - getSelf(), "tick", getContext().dispatcher(), null); - //#schedule-constructor - // this variable and constructor is declared here to not show up in the docs - final ActorRef target; - public ScheduleInConstructor(ActorRef target) { - this.target = target; - } - //#schedule-constructor - - @Override - public void postStop() { - tick.cancel(); - } - - @Override - public Receive createReceive() { - return receiveBuilder() - .matchEquals("tick", message -> { - // do something useful here - //#schedule-constructor - target.tell(message, getSelf()); - //#schedule-constructor - }) - .matchEquals("restart", message -> { - throw new ArithmeticException(); - }) - .build(); - } - } - //#schedule-constructor - - static - //#schedule-receive - public class ScheduleInReceive extends AbstractActor { - //#schedule-receive - // this variable and constructor is declared here to not show up in the docs - final ActorRef target; - public ScheduleInReceive(ActorRef target) { - this.target = target; - } - //#schedule-receive - - @Override - public void preStart() { - getContext().getSystem().scheduler().scheduleOnce( - Duration.create(500, TimeUnit.MILLISECONDS), - getSelf(), "tick", getContext().dispatcher(), null); - } - - // override postRestart so we don't call preStart and schedule a new message - @Override - public void postRestart(Throwable reason) { - } - - @Override - public Receive createReceive() { - return receiveBuilder() - .matchEquals("tick", message -> { - // send another periodic tick after the specified delay - getContext().getSystem().scheduler().scheduleOnce( - Duration.create(1, TimeUnit.SECONDS), - getSelf(), "tick", getContext().dispatcher(), null); - // do something useful here - //#schedule-receive - target.tell(message, getSelf()); - //#schedule-receive - }) - .matchEquals("restart", message -> { - throw new ArithmeticException(); - }) - .build(); - } - } - //#schedule-receive - - @Test - @Ignore // no way to tag this as timing sensitive - public void scheduleInConstructor() { - new TestSchedule(system) {{ - final TestKit probe = new TestKit(system); - final Props props = Props.create(ScheduleInConstructor.class, probe.getRef()); - testSchedule(probe, props, duration("3000 millis"), duration("2000 millis")); - }}; - } - - @Test - @Ignore // no way to tag this as timing sensitive - public void scheduleInReceive() { - new TestSchedule(system) {{ - final TestKit probe = new TestKit(system); - final Props props = Props.create(ScheduleInReceive.class, probe.getRef()); - testSchedule(probe, props, duration("3000 millis"), duration("2500 millis")); - }}; - } - - @Test - public void doNothing() { - // actorSystemResource.after is not called when all tests are ignored - } - - public static class TestSchedule extends TestKit { - private ActorSystem system; - - public TestSchedule(ActorSystem system) { - super(system); - this.system = system; - } - - public void testSchedule(final TestKit probe, Props props, - FiniteDuration startDuration, - FiniteDuration afterRestartDuration) { - Iterable filter = - Arrays.asList(new akka.testkit.EventFilter[]{ - (akka.testkit.EventFilter) new ErrorFilter(ArithmeticException.class)}); - try { - system.eventStream().publish(new Mute(filter)); - - final ActorRef actor = system.actorOf(props); - within(startDuration, () -> { - probe.expectMsgEquals("tick"); - probe.expectMsgEquals("tick"); - probe.expectMsgEquals("tick"); - return null; - }); - - actor.tell("restart", getRef()); - within(afterRestartDuration, () -> { - probe.expectMsgEquals("tick"); - probe.expectMsgEquals("tick"); - return null; - }); - - system.stop(actor); - } - finally { - system.eventStream().publish(new UnMute(filter)); - } - } - } -} diff --git a/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java b/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java index 3570f8c0f7..6c4bfe6c25 100644 --- a/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java +++ b/akka-docs/src/test/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java @@ -6,6 +6,8 @@ package jdocs.persistence; import docs.persistence.ExampleJsonMarshaller; import docs.persistence.proto.FlightAppModels; + +import java.io.NotSerializableException; import java.nio.charset.Charset; import spray.json.JsObject; @@ -72,7 +74,7 @@ public class PersistenceSchemaEvolutionDocTest { return o.getClass().getName(); } - @Override public Object fromBinary(byte[] bytes, String manifest) { + @Override public Object fromBinary(byte[] bytes, String manifest) throws NotSerializableException{ if (seatReservedManifest.equals(manifest)) { // use generated protobuf serializer try { @@ -81,7 +83,7 @@ public class PersistenceSchemaEvolutionDocTest { throw new IllegalArgumentException(e.getMessage()); } } else { - throw new IllegalArgumentException("Unable to handle manifest: " + manifest); + throw new NotSerializableException("Unable to handle manifest: " + manifest); } } @@ -208,13 +210,13 @@ public class PersistenceSchemaEvolutionDocTest { } // deserialize the object, using the manifest to indicate which logic to apply - @Override public Object fromBinary(byte[] bytes, String manifest) { + @Override public Object fromBinary(byte[] bytes, String manifest) throws NotSerializableException { if (personManifest.equals(manifest)) { String nameAndSurname = new String(bytes, utf8); String[] parts = nameAndSurname.split("[|]"); return new Person(parts[0], parts[1]); } else { - throw new IllegalArgumentException( + throw new NotSerializableException( "Unable to deserialize from bytes, manifest was: " + manifest + "! Bytes length: " + bytes.length); } @@ -412,12 +414,12 @@ public class PersistenceSchemaEvolutionDocTest { } } - @Override public Object fromBinary(byte[] bytes, String manifest) { + @Override public Object fromBinary(byte[] bytes, String manifest) throws NotSerializableException { if (oldPayloadClassName.equals(manifest)) return new SamplePayload(new String(bytes, utf8)); else if (myPayloadClassName.equals(manifest)) return new SamplePayload(new String(bytes, utf8)); - else throw new IllegalArgumentException("unexpected manifest [" + manifest + "]"); + else throw new NotSerializableException("unexpected manifest [" + manifest + "]"); } } //#string-serializer-handle-rename diff --git a/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java b/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java similarity index 96% rename from akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java rename to akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java index 2c480da21e..21c4d819c8 100644 --- a/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java +++ b/akka-docs/src/test/java/jdocs/sharding/ClusterShardingTest.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2017 Lightbend Inc. */ -package akka.cluster.sharding; +package jdocs.sharding; import static java.util.concurrent.TimeUnit.SECONDS; @@ -19,7 +19,17 @@ import akka.actor.Props; import akka.actor.SupervisorStrategy; import akka.actor.Terminated; import akka.actor.ReceiveTimeout; +//#counter-extractor +import akka.cluster.sharding.ShardRegion; + +//#counter-extractor + +//#counter-start import akka.japi.Option; +import akka.cluster.sharding.ClusterSharding; +import akka.cluster.sharding.ClusterShardingSettings; + +//#counter-start import akka.persistence.AbstractPersistentActor; import akka.cluster.Cluster; import akka.japi.pf.DeciderBuilder; diff --git a/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java b/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java index 01f3acae09..e0b92a6131 100644 --- a/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java +++ b/akka-docs/src/test/java/jdocs/stream/FlowErrorDocTest.java @@ -11,6 +11,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import akka.NotUsed; +import akka.japi.pf.PFBuilder; import jdocs.AbstractJavaTest; import akka.testkit.javadsl.TestKit; import org.junit.AfterClass; @@ -138,4 +139,64 @@ public class FlowErrorDocTest extends AbstractJavaTest { result.toCompletableFuture().get(3, TimeUnit.SECONDS)); } + @Test + public void demonstrateRecover() { + //#recover + final Materializer mat = ActorMaterializer.create(system); + Source.from(Arrays.asList(0, 1, 2, 3, 4, 5, 6)).map(n -> { + if (n < 5) return n.toString(); + else throw new RuntimeException("Boom!"); + }).recover(new PFBuilder() + .match(RuntimeException.class, ex -> "stream truncated") + .build() + ).runForeach(System.out::println, mat); + //#recover + +/* +Output: +//#recover-output +0 +1 +2 +3 +4 +stream truncated +//#recover-output +*/ + } + + @Test + public void demonstrateRecoverWithRetries() { + //#recoverWithRetries + final Materializer mat = ActorMaterializer.create(system); + Source planB = Source.from(Arrays.asList("five", "six", "seven", "eight")); + + Source.from(Arrays.asList(0, 1, 2, 3, 4, 5, 6)).map(n -> { + if (n < 5) return n.toString(); + else throw new RuntimeException("Boom!"); + }).recoverWithRetries( + 1, // max attempts + new PFBuilder() + .match(RuntimeException.class, ex -> planB) + .build() + ).runForeach(System.out::println, mat); + //#recoverWithRetries + +/* +Output: +//#recoverWithRetries-output +0 +1 +2 +3 +4 +five +six +seven +eight +//#recoverWithRetries-output + */ + } + + } diff --git a/akka-docs/src/test/java/jdocs/stream/GraphStageLoggingDocTest.java b/akka-docs/src/test/java/jdocs/stream/GraphStageLoggingDocTest.java index 2de4df547d..c0e267b653 100644 --- a/akka-docs/src/test/java/jdocs/stream/GraphStageLoggingDocTest.java +++ b/akka-docs/src/test/java/jdocs/stream/GraphStageLoggingDocTest.java @@ -9,7 +9,13 @@ import akka.stream.Attributes; import akka.stream.Materializer; import akka.stream.Outlet; import akka.stream.SourceShape; -import akka.stream.stage.*; +//#stage-with-logging +import akka.stream.stage.AbstractOutHandler; +import akka.stream.stage.GraphStage; +import akka.stream.stage.GraphStageLogic; +import akka.stream.stage.GraphStageLogicWithLogging; + +//#stage-with-logging import jdocs.AbstractJavaTest; import org.junit.Test; @@ -21,7 +27,7 @@ public class GraphStageLoggingDocTest extends AbstractJavaTest { @Test public void compileOnlyTestClass() throws Exception { } - + //#stage-with-logging public class RandomLettersSource extends GraphStage> { public final Outlet out = Outlet.create("RandomLettersSource.in"); diff --git a/akka-docs/src/test/java/jdocs/stream/HubDocTest.java b/akka-docs/src/test/java/jdocs/stream/HubDocTest.java index 9e6ea667b6..63746772d2 100644 --- a/akka-docs/src/test/java/jdocs/stream/HubDocTest.java +++ b/akka-docs/src/test/java/jdocs/stream/HubDocTest.java @@ -11,17 +11,25 @@ import akka.japi.Pair; import akka.stream.ActorMaterializer; import akka.stream.KillSwitches; import akka.stream.Materializer; +import akka.stream.ThrottleMode; import akka.stream.UniqueKillSwitch; import akka.stream.javadsl.*; +import akka.stream.javadsl.PartitionHub.ConsumerInfo; + import jdocs.AbstractJavaTest; import akka.testkit.javadsl.TestKit; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; +import scala.concurrent.duration.Duration; import scala.concurrent.duration.FiniteDuration; +import java.util.List; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; +import java.util.function.Supplier; +import java.util.function.ToLongBiFunction; public class HubDocTest extends AbstractJavaTest { @@ -137,4 +145,136 @@ public class HubDocTest extends AbstractJavaTest { killSwitch.shutdown(); //#pub-sub-4 } + + @Test + public void dynamicPartition() { + // Used to be able to clean up the running stream + ActorMaterializer materializer = ActorMaterializer.create(system); + + //#partition-hub + // A simple producer that publishes a new "message-n" every second + Source producer = Source.tick( + FiniteDuration.create(1, TimeUnit.SECONDS), + FiniteDuration.create(1, TimeUnit.SECONDS), + "message" + ).zipWith(Source.range(0, 100), (a, b) -> a + "-" + b); + + // Attach a PartitionHub Sink to the producer. This will materialize to a + // corresponding Source. + // (We need to use toMat and Keep.right since by default the materialized + // value to the left is used) + RunnableGraph> runnableGraph = + producer.toMat(PartitionHub.of( + String.class, + (size, elem) -> Math.abs(elem.hashCode()) % size, + 2, 256), Keep.right()); + + // By running/materializing the producer, we get back a Source, which + // gives us access to the elements published by the producer. + Source fromProducer = runnableGraph.run(materializer); + + // Print out messages from the producer in two independent consumers + fromProducer.runForeach(msg -> System.out.println("consumer1: " + msg), materializer); + fromProducer.runForeach(msg -> System.out.println("consumer2: " + msg), materializer); + //#partition-hub + + // Cleanup + materializer.shutdown(); + } + + //#partition-hub-stateful-function + // Using a class since variable must otherwise be final. + // New instance is created for each materialization of the PartitionHub. + static class RoundRobin implements ToLongBiFunction { + + private long i = -1; + + @Override + public long applyAsLong(ConsumerInfo info, T elem) { + i++; + return info.consumerIdByIdx((int) (i % info.size())); + } + } + //#partition-hub-stateful-function + + @Test + public void dynamicStatefulPartition() { + // Used to be able to clean up the running stream + ActorMaterializer materializer = ActorMaterializer.create(system); + + //#partition-hub-stateful + // A simple producer that publishes a new "message-n" every second + Source producer = Source.tick( + FiniteDuration.create(1, TimeUnit.SECONDS), + FiniteDuration.create(1, TimeUnit.SECONDS), + "message" + ).zipWith(Source.range(0, 100), (a, b) -> a + "-" + b); + + // Attach a PartitionHub Sink to the producer. This will materialize to a + // corresponding Source. + // (We need to use toMat and Keep.right since by default the materialized + // value to the left is used) + RunnableGraph> runnableGraph = + producer.toMat( + PartitionHub.ofStateful( + String.class, + () -> new RoundRobin(), + 2, + 256), + Keep.right()); + + // By running/materializing the producer, we get back a Source, which + // gives us access to the elements published by the producer. + Source fromProducer = runnableGraph.run(materializer); + + // Print out messages from the producer in two independent consumers + fromProducer.runForeach(msg -> System.out.println("consumer1: " + msg), materializer); + fromProducer.runForeach(msg -> System.out.println("consumer2: " + msg), materializer); + //#partition-hub-stateful + + // Cleanup + materializer.shutdown(); + } + + @Test + public void dynamicFastestPartition() { + // Used to be able to clean up the running stream + ActorMaterializer materializer = ActorMaterializer.create(system); + + //#partition-hub-fastest + Source producer = Source.range(0, 100); + + // ConsumerInfo.queueSize is the approximate number of buffered elements for a consumer. + // Note that this is a moving target since the elements are consumed concurrently. + RunnableGraph> runnableGraph = + producer.toMat( + PartitionHub.ofStateful( + Integer.class, + () -> (info, elem) -> { + final List ids = info.getConsumerIds(); + int minValue = info.queueSize(0); + long fastest = info.consumerIdByIdx(0); + for (int i = 1; i < ids.size(); i++) { + int value = info.queueSize(i); + if (value < minValue) { + minValue = value; + fastest = info.consumerIdByIdx(i); + } + } + return fastest; + }, + 2, + 8), + Keep.right()); + + Source fromProducer = runnableGraph.run(materializer); + + fromProducer.runForeach(msg -> System.out.println("consumer1: " + msg), materializer); + fromProducer.throttle(10, Duration.create(100, TimeUnit.MILLISECONDS), 10, ThrottleMode.shaping()) + .runForeach(msg -> System.out.println("consumer2: " + msg), materializer); + //#partition-hub-fastest + + // Cleanup + materializer.shutdown(); + } } diff --git a/akka-docs/src/test/java/jdocs/stream/RestartDocTest.java b/akka-docs/src/test/java/jdocs/stream/RestartDocTest.java new file mode 100644 index 0000000000..bd909620c2 --- /dev/null +++ b/akka-docs/src/test/java/jdocs/stream/RestartDocTest.java @@ -0,0 +1,86 @@ +/** + * Copyright (C) 2015-2017 Lightbend Inc. + */ +package jdocs.stream; + +import akka.NotUsed; +import akka.actor.ActorSystem; +import akka.stream.KillSwitch; +import akka.stream.KillSwitches; +import akka.stream.Materializer; +import akka.stream.javadsl.*; +import scala.concurrent.duration.Duration; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.TimeUnit; + +public class RestartDocTest { + + static ActorSystem system; + static Materializer materializer; + + // Mocking akka-http + public static class Http { + public static Http get(ActorSystem system) { + return new Http(); + } + public CompletionStage singleRequest(String uri) { + return new CompletableFuture<>(); + } + public NotUsed entity() { + return NotUsed.getInstance(); + } + } + public static class HttpRequest { + public static String create(String uri) { + return uri; + } + } + public static class ServerSentEvent {} + public static class EventStreamUnmarshalling { + public static EventStreamUnmarshalling fromEventStream() { + return new EventStreamUnmarshalling(); + } + public CompletionStage> unmarshall(Http http, Materializer mat) { + return new CompletableFuture<>(); + } + } + public void doSomethingElse() { + + } + + public void recoverWithBackoffSource() { + //#restart-with-backoff-source + Source eventStream = RestartSource.withBackoff( + Duration.apply(3, TimeUnit.SECONDS), // min backoff + Duration.apply(30, TimeUnit.SECONDS), // max backoff + 0.2, // adds 20% "noise" to vary the intervals slightly + + () -> + // Create a source from a future of a source + Source.fromSourceCompletionStage( + // Issue a GET request on the event stream + Http.get(system).singleRequest(HttpRequest.create("http://example.com/eventstream")) + .thenCompose(response -> + // Unmarshall it to a stream of ServerSentEvents + EventStreamUnmarshalling.fromEventStream() + .unmarshall(response, materializer) + ) + ) + ); + //#restart-with-backoff-source + + //#with-kill-switch + KillSwitch killSwitch = eventStream + .viaMat(KillSwitches.single(), Keep.right()) + .toMat(Sink.foreach(event -> System.out.println("Got event: " + event)), Keep.left()) + .run(materializer); + + doSomethingElse(); + + killSwitch.shutdown(); + //#with-kill-switch + + } +} \ No newline at end of file diff --git a/akka-docs/src/test/java/jdocs/stream/io/StreamTcpDocTest.java b/akka-docs/src/test/java/jdocs/stream/io/StreamTcpDocTest.java index e5edc2f4de..5006ed412a 100644 --- a/akka-docs/src/test/java/jdocs/stream/io/StreamTcpDocTest.java +++ b/akka-docs/src/test/java/jdocs/stream/io/StreamTcpDocTest.java @@ -64,7 +64,7 @@ public class StreamTcpDocTest extends AbstractJavaTest { //#echo-server-simple-bind // IncomingConnection and ServerBinding imported from Tcp final Source> connections = - Tcp.get(system).bind("127.0.0.1", 8889); + Tcp.get(system).bind("127.0.0.1", 8888); //#echo-server-simple-bind } { @@ -133,7 +133,7 @@ public class StreamTcpDocTest extends AbstractJavaTest { { //#repl-client final Flow> connection = - Tcp.get(system).outgoingConnection("127.0.0.1", 8889); + Tcp.get(system).outgoingConnection("127.0.0.1", 8888); //#repl-client } diff --git a/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java b/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java index 006643b675..1d45a29fff 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java +++ b/akka-docs/src/test/java/jdocs/tutorial_1/ActorHierarchyExperiments.java @@ -1,10 +1,8 @@ -package jdocs.tutorial_1; +//#print-refs +package com.lightbend.akka.sample; + +//#print-refs -import akka.actor.AbstractActor; -import akka.actor.AbstractActor.Receive; -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; import akka.testkit.javadsl.TestKit; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -12,6 +10,12 @@ import org.junit.Test; import org.scalatest.junit.JUnitSuite; //#print-refs +import akka.actor.AbstractActor; +import akka.actor.AbstractActor.Receive; +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.actor.Props; + class PrintMyActorRefActor extends AbstractActor { @Override public Receive createReceive() { @@ -106,6 +110,26 @@ class SupervisedActor extends AbstractActor { } //#supervise +//#print-refs +public class ActorHierarchyExperiments { + public static void main(String[] args) throws java.io.IOException { + ActorSystem system = ActorSystem.create("test"); + + ActorRef firstRef = system.actorOf(Props.create(PrintMyActorRefActor.class), "first-actor"); + System.out.println("First: " + firstRef); + firstRef.tell("printit", ActorRef.noSender()); + + System.out.println(">>> Press ENTER to exit <<<"); + try { + System.in.read(); + } finally { + system.terminate(); + } + } +} +//#print-refs + + class ActorHierarchyExperimentsTest extends JUnitSuite { static ActorSystem system; @@ -120,28 +144,19 @@ class ActorHierarchyExperimentsTest extends JUnitSuite { system = null; } - @Test - public void testCreateTopAndChildActor() { - //#print-refs - ActorRef firstRef = system.actorOf(Props.create(PrintMyActorRefActor.class), "first-actor"); - System.out.println("First : " + firstRef); - firstRef.tell("printit", ActorRef.noSender()); - //#print-refs - } - @Test public void testStartAndStopActors() { - //#start-stop + //#start-stop-main ActorRef first = system.actorOf(Props.create(StartStopActor1.class), "first"); first.tell("stop", ActorRef.noSender()); - //#start-stop + //#start-stop-main } @Test public void testSuperviseActors() { - //#supervise + //#supervise-main ActorRef supervisingActor = system.actorOf(Props.create(SupervisingActor.class), "supervising-actor"); supervisingActor.tell("failChild", ActorRef.noSender()); - //#supervise + //#supervise-main } } diff --git a/akka-docs/src/test/java/jdocs/tutorial_1/IotMain.java b/akka-docs/src/test/java/jdocs/tutorial_2/IotMain.java similarity index 94% rename from akka-docs/src/test/java/jdocs/tutorial_1/IotMain.java rename to akka-docs/src/test/java/jdocs/tutorial_2/IotMain.java index 49b49d9b23..f78505d4f2 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_1/IotMain.java +++ b/akka-docs/src/test/java/jdocs/tutorial_2/IotMain.java @@ -1,9 +1,9 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_1; //#iot-app +package com.lightbend.akka.sample; import java.io.IOException; diff --git a/akka-docs/src/test/java/jdocs/tutorial_1/IotSupervisor.java b/akka-docs/src/test/java/jdocs/tutorial_2/IotSupervisor.java similarity index 95% rename from akka-docs/src/test/java/jdocs/tutorial_1/IotSupervisor.java rename to akka-docs/src/test/java/jdocs/tutorial_2/IotSupervisor.java index 07f3d28c51..f19f19beb1 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_1/IotSupervisor.java +++ b/akka-docs/src/test/java/jdocs/tutorial_2/IotSupervisor.java @@ -1,9 +1,9 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_1; //#iot-supervisor +package com.lightbend.akka.sample; import akka.actor.AbstractActor; import akka.actor.ActorLogging; diff --git a/akka-docs/src/test/java/jdocs/tutorial_3/Device.java b/akka-docs/src/test/java/jdocs/tutorial_3/Device.java index 3bc1eae1a8..52ac56b89a 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_3/Device.java +++ b/akka-docs/src/test/java/jdocs/tutorial_3/Device.java @@ -3,18 +3,16 @@ */ package jdocs.tutorial_3; -//#device-with-register +//#full-device + +import java.util.Optional; import akka.actor.AbstractActor; +import akka.actor.AbstractActor.Receive; import akka.actor.Props; import akka.event.Logging; import akka.event.LoggingAdapter; -import jdocs.tutorial_3.DeviceManager.DeviceRegistered; -import jdocs.tutorial_3.DeviceManager.RequestTrackDevice; - -import java.util.Optional; - public class Device extends AbstractActor { private final LoggingAdapter log = Logging.getLogger(getContext().getSystem(), this); @@ -82,16 +80,6 @@ public class Device extends AbstractActor { @Override public Receive createReceive() { return receiveBuilder() - .match(RequestTrackDevice.class, r -> { - if (this.groupId.equals(r.groupId) && this.deviceId.equals(r.deviceId)) { - getSender().tell(new DeviceRegistered(), getSelf()); - } else { - log.warning( - "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", - r.groupId, r.deviceId, this.groupId, this.deviceId - ); - } - }) .match(RecordTemperature.class, r -> { log.info("Recorded temperature reading {} with {}", r.value, r.requestId); lastTemperatureReading = Optional.of(r.value); @@ -103,4 +91,4 @@ public class Device extends AbstractActor { .build(); } } -//#device-with-register +//#full-device diff --git a/akka-docs/src/test/java/jdocs/tutorial_2/DeviceInProgress.java b/akka-docs/src/test/java/jdocs/tutorial_3/DeviceInProgress.java similarity index 59% rename from akka-docs/src/test/java/jdocs/tutorial_2/DeviceInProgress.java rename to akka-docs/src/test/java/jdocs/tutorial_3/DeviceInProgress.java index 6b1801c3a5..da7f797b72 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_2/DeviceInProgress.java +++ b/akka-docs/src/test/java/jdocs/tutorial_3/DeviceInProgress.java @@ -1,11 +1,11 @@ -package jdocs.tutorial_2; +package jdocs.tutorial_3; import java.util.Optional; -import jdocs.tutorial_2.Device.ReadTemperature; -import jdocs.tutorial_2.Device.RecordTemperature; -import jdocs.tutorial_2.Device.RespondTemperature; -import jdocs.tutorial_2.Device.TemperatureRecorded; +import jdocs.tutorial_3.Device.ReadTemperature; +import jdocs.tutorial_3.Device.RecordTemperature; +import jdocs.tutorial_3.Device.RespondTemperature; +import jdocs.tutorial_3.Device.TemperatureRecorded; class DeviceInProgress1 { diff --git a/akka-docs/src/test/java/jdocs/tutorial_2/DeviceInProgress3.java b/akka-docs/src/test/java/jdocs/tutorial_3/DeviceInProgress3.java similarity index 89% rename from akka-docs/src/test/java/jdocs/tutorial_2/DeviceInProgress3.java rename to akka-docs/src/test/java/jdocs/tutorial_3/DeviceInProgress3.java index 6deb67678a..65b4ca90a6 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_2/DeviceInProgress3.java +++ b/akka-docs/src/test/java/jdocs/tutorial_3/DeviceInProgress3.java @@ -1,4 +1,4 @@ -package jdocs.tutorial_2; +package jdocs.tutorial_3; class DeviceInProgress3 { diff --git a/akka-docs/src/test/java/jdocs/tutorial_3/DeviceTest.java b/akka-docs/src/test/java/jdocs/tutorial_3/DeviceTest.java index 156641e3de..49eece191f 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_3/DeviceTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_3/DeviceTest.java @@ -3,9 +3,7 @@ */ package jdocs.tutorial_3; -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.testkit.javadsl.TestKit; +import java.util.Optional; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -14,7 +12,9 @@ import static org.junit.Assert.assertEquals; import org.scalatest.junit.JUnitSuite; -import java.util.Optional; +import akka.actor.ActorSystem; +import akka.actor.ActorRef; +import akka.testkit.javadsl.TestKit; public class DeviceTest extends JUnitSuite { @@ -31,30 +31,6 @@ public class DeviceTest extends JUnitSuite { system = null; } - //#device-registration-tests - @Test - public void testReplyToRegistrationRequests() { - TestKit probe = new TestKit(system); - ActorRef deviceActor = system.actorOf(Device.props("group", "device")); - - deviceActor.tell(new DeviceManager.RequestTrackDevice("group", "device"), probe.getRef()); - probe.expectMsgClass(DeviceManager.DeviceRegistered.class); - assertEquals(deviceActor, probe.getLastSender()); - } - - @Test - public void testIgnoreWrongRegistrationRequests() { - TestKit probe = new TestKit(system); - ActorRef deviceActor = system.actorOf(Device.props("group", "device")); - - deviceActor.tell(new DeviceManager.RequestTrackDevice("wrongGroup", "device"), probe.getRef()); - probe.expectNoMsg(); - - deviceActor.tell(new DeviceManager.RequestTrackDevice("group", "wrongDevice"), probe.getRef()); - probe.expectNoMsg(); - } - //#device-registration-tests - //#device-read-test @Test public void testReplyWithEmptyReadingIfNoTemperatureIsKnown() { diff --git a/akka-docs/src/test/java/jdocs/tutorial_2/inprogress2/DeviceInProgress2.java b/akka-docs/src/test/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java similarity index 97% rename from akka-docs/src/test/java/jdocs/tutorial_2/inprogress2/DeviceInProgress2.java rename to akka-docs/src/test/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java index 2a46359392..c30fffa2af 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_2/inprogress2/DeviceInProgress2.java +++ b/akka-docs/src/test/java/jdocs/tutorial_3/inprogress2/DeviceInProgress2.java @@ -1,4 +1,4 @@ -package jdocs.tutorial_2.inprogress2; +package jdocs.tutorial_3.inprogress2; //#device-with-read diff --git a/akka-docs/src/test/java/jdocs/tutorial_4/Device.java b/akka-docs/src/test/java/jdocs/tutorial_4/Device.java index 766dcd37a3..3eefeee518 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_4/Device.java +++ b/akka-docs/src/test/java/jdocs/tutorial_4/Device.java @@ -3,6 +3,8 @@ */ package jdocs.tutorial_4; +//#device-with-register + import akka.actor.AbstractActor; import akka.actor.Props; import akka.event.Logging; @@ -101,3 +103,4 @@ public class Device extends AbstractActor { .build(); } } +//#device-with-register diff --git a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroup.java b/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroup.java index 3e41da0e8b..99939cd854 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroup.java +++ b/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroup.java @@ -3,20 +3,21 @@ */ package jdocs.tutorial_4; +import java.util.Set; +import java.util.Map; +import java.util.HashMap; + import akka.actor.AbstractActor; import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.Terminated; import akka.event.Logging; import akka.event.LoggingAdapter; -import scala.concurrent.duration.FiniteDuration; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; +import jdocs.tutorial_4.Device; +import jdocs.tutorial_4.DeviceManager; -//#query-added +//#device-group-full public class DeviceGroup extends AbstractActor { private final LoggingAdapter log = Logging.getLogger(getContext().getSystem(), this); @@ -26,9 +27,11 @@ public class DeviceGroup extends AbstractActor { this.groupId = groupId; } + //#device-group-register public static Props props(String groupId) { return Props.create(DeviceGroup.class, groupId); } + //#device-group-register public static final class RequestDeviceList { final long requestId; @@ -47,51 +50,15 @@ public class DeviceGroup extends AbstractActor { this.ids = ids; } } - - //#query-protocol - public static final class RequestAllTemperatures { - final long requestId; - - public RequestAllTemperatures(long requestId) { - this.requestId = requestId; - } - } - - public static final class RespondAllTemperatures { - final long requestId; - final Map temperatures; - - public RespondAllTemperatures(long requestId, Map temperatures) { - this.requestId = requestId; - this.temperatures = temperatures; - } - } - - public static interface TemperatureReading { - } - - public static final class Temperature implements TemperatureReading { - public final double value; - - public Temperature(double value) { - this.value = value; - } - } - - public static final class TemperatureNotAvailable implements TemperatureReading { - } - - public static final class DeviceNotAvailable implements TemperatureReading { - } - - public static final class DeviceTimedOut implements TemperatureReading { - } - //#query-protocol - + //#device-group-register +//#device-group-register +//#device-group-register +//#device-group-remove final Map deviceIdToActor = new HashMap<>(); + //#device-group-register final Map actorToDeviceId = new HashMap<>(); - final long nextCollectionId = 0L; + //#device-group-register @Override public void preStart() { @@ -103,19 +70,20 @@ public class DeviceGroup extends AbstractActor { log.info("DeviceGroup {} stopped", groupId); } - //#query-added private void onTrackDevice(DeviceManager.RequestTrackDevice trackMsg) { if (this.groupId.equals(trackMsg.groupId)) { - ActorRef ref = deviceIdToActor.get(trackMsg.deviceId); - if (ref != null) { - ref.forward(trackMsg, getContext()); + ActorRef deviceActor = deviceIdToActor.get(trackMsg.deviceId); + if (deviceActor != null) { + deviceActor.forward(trackMsg, getContext()); } else { log.info("Creating device actor for {}", trackMsg.deviceId); - ActorRef deviceActor = getContext().actorOf(Device.props(groupId, trackMsg.deviceId), "device-" + trackMsg.deviceId); + deviceActor = getContext().actorOf(Device.props(groupId, trackMsg.deviceId), "device-" + trackMsg.deviceId); + //#device-group-register getContext().watch(deviceActor); - deviceActor.forward(trackMsg, getContext()); actorToDeviceId.put(deviceActor, trackMsg.deviceId); + //#device-group-register deviceIdToActor.put(trackMsg.deviceId, deviceActor); + deviceActor.forward(trackMsg, getContext()); } } else { log.warning( @@ -136,24 +104,16 @@ public class DeviceGroup extends AbstractActor { actorToDeviceId.remove(deviceActor); deviceIdToActor.remove(deviceId); } - //#query-added - - private void onAllTemperatures(RequestAllTemperatures r) { - getContext().actorOf(DeviceGroupQuery.props( - actorToDeviceId, r.requestId, getSender(), new FiniteDuration(3, TimeUnit.SECONDS))); - } @Override public Receive createReceive() { - //#query-added return receiveBuilder() .match(DeviceManager.RequestTrackDevice.class, this::onTrackDevice) .match(RequestDeviceList.class, this::onDeviceList) .match(Terminated.class, this::onTerminated) - //#query-added - // ... other cases omitted - .match(RequestAllTemperatures.class, this::onAllTemperatures) .build(); } } -//#query-added +//#device-group-remove +//#device-group-register +//#device-group-full diff --git a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupTest.java b/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupTest.java index a51cf88a29..fc41977509 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupTest.java @@ -3,10 +3,8 @@ */ package jdocs.tutorial_4; -import java.util.HashMap; -import java.util.Map; -import java.util.stream.Collectors; import java.util.stream.Stream; +import java.util.stream.Collectors; import akka.actor.ActorRef; import akka.actor.ActorSystem; @@ -21,8 +19,6 @@ import static org.junit.Assert.assertNotEquals; import org.scalatest.junit.JUnitSuite; -import static jdocs.tutorial_4.DeviceGroupQueryTest.assertEqualTemperatures; - public class DeviceGroupTest extends JUnitSuite { static ActorSystem system; @@ -38,6 +34,7 @@ public class DeviceGroupTest extends JUnitSuite { system = null; } + //#device-group-test-registration @Test public void testRegisterDeviceActor() { TestKit probe = new TestKit(system); @@ -67,7 +64,9 @@ public class DeviceGroupTest extends JUnitSuite { groupActor.tell(new DeviceManager.RequestTrackDevice("wrongGroup", "device1"), probe.getRef()); probe.expectNoMsg(); } + //#device-group-test-registration + //#device-group-test3 @Test public void testReturnSameActorForSameDeviceId() { TestKit probe = new TestKit(system); @@ -82,7 +81,9 @@ public class DeviceGroupTest extends JUnitSuite { ActorRef deviceActor2 = probe.getLastSender(); assertEquals(deviceActor1, deviceActor2); } + //#device-group-test3 + //#device-group-list-terminate-test @Test public void testListActiveDevices() { TestKit probe = new TestKit(system); @@ -125,49 +126,12 @@ public class DeviceGroupTest extends JUnitSuite { // to see the Terminated, that order is undefined probe.awaitAssert(() -> { groupActor.tell(new DeviceGroup.RequestDeviceList(1L), probe.getRef()); - DeviceGroup.ReplyDeviceList r = + DeviceGroup.ReplyDeviceList r = probe.expectMsgClass(DeviceGroup.ReplyDeviceList.class); assertEquals(1L, r.requestId); assertEquals(Stream.of("device2").collect(Collectors.toSet()), r.ids); return null; }); } - - //#group-query-integration-test - @Test - public void testCollectTemperaturesFromAllActiveDevices() { - TestKit probe = new TestKit(system); - ActorRef groupActor = system.actorOf(DeviceGroup.props("group")); - - groupActor.tell(new DeviceManager.RequestTrackDevice("group", "device1"), probe.getRef()); - probe.expectMsgClass(DeviceManager.DeviceRegistered.class); - ActorRef deviceActor1 = probe.getLastSender(); - - groupActor.tell(new DeviceManager.RequestTrackDevice("group", "device2"), probe.getRef()); - probe.expectMsgClass(DeviceManager.DeviceRegistered.class); - ActorRef deviceActor2 = probe.getLastSender(); - - groupActor.tell(new DeviceManager.RequestTrackDevice("group", "device3"), probe.getRef()); - probe.expectMsgClass(DeviceManager.DeviceRegistered.class); - ActorRef deviceActor3 = probe.getLastSender(); - - // Check that the device actors are working - deviceActor1.tell(new Device.RecordTemperature(0L, 1.0), probe.getRef()); - assertEquals(0L, probe.expectMsgClass(Device.TemperatureRecorded.class).requestId); - deviceActor2.tell(new Device.RecordTemperature(1L, 2.0), probe.getRef()); - assertEquals(1L, probe.expectMsgClass(Device.TemperatureRecorded.class).requestId); - // No temperature for device 3 - - groupActor.tell(new DeviceGroup.RequestAllTemperatures(0L), probe.getRef()); - DeviceGroup.RespondAllTemperatures response = probe.expectMsgClass(DeviceGroup.RespondAllTemperatures.class); - assertEquals(0L, response.requestId); - - Map expectedTemperatures = new HashMap<>(); - expectedTemperatures.put("device1", new DeviceGroup.Temperature(1.0)); - expectedTemperatures.put("device2", new DeviceGroup.Temperature(2.0)); - expectedTemperatures.put("device3", new DeviceGroup.TemperatureNotAvailable()); - - assertEqualTemperatures(expectedTemperatures, response.temperatures); - } - //#group-query-integration-test + //#device-group-list-terminate-test } diff --git a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceManager.java b/akka-docs/src/test/java/jdocs/tutorial_4/DeviceManager.java index 6f2e1dd672..6c95dbbe27 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceManager.java +++ b/akka-docs/src/test/java/jdocs/tutorial_4/DeviceManager.java @@ -4,6 +4,9 @@ package jdocs.tutorial_4; +import java.util.Map; +import java.util.HashMap; + import akka.actor.AbstractActor; import akka.actor.ActorRef; import akka.actor.Props; @@ -11,9 +14,7 @@ import akka.actor.Terminated; import akka.event.Logging; import akka.event.LoggingAdapter; -import java.util.HashMap; -import java.util.Map; - +//#device-manager-full public class DeviceManager extends AbstractActor { private final LoggingAdapter log = Logging.getLogger(getContext().getSystem(), this); @@ -21,6 +22,7 @@ public class DeviceManager extends AbstractActor { return Props.create(DeviceManager.class); } + //#device-manager-msgs public static final class RequestTrackDevice { public final String groupId; public final String deviceId; @@ -34,6 +36,7 @@ public class DeviceManager extends AbstractActor { public static final class DeviceRegistered { } + //#device-manager-msgs final Map groupIdToActor = new HashMap<>(); final Map actorToGroupId = new HashMap<>(); @@ -78,3 +81,4 @@ public class DeviceManager extends AbstractActor { } } +//#device-manager-full diff --git a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceTest.java b/akka-docs/src/test/java/jdocs/tutorial_4/DeviceTest.java index 09b20766c2..f7335d709c 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_4/DeviceTest.java @@ -3,8 +3,6 @@ */ package jdocs.tutorial_4; -import java.util.Optional; - import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; @@ -16,6 +14,8 @@ import static org.junit.Assert.assertEquals; import org.scalatest.junit.JUnitSuite; +import java.util.Optional; + public class DeviceTest extends JUnitSuite { static ActorSystem system; @@ -31,6 +31,7 @@ public class DeviceTest extends JUnitSuite { system = null; } + //#device-registration-tests @Test public void testReplyToRegistrationRequests() { TestKit probe = new TestKit(system); @@ -52,7 +53,9 @@ public class DeviceTest extends JUnitSuite { deviceActor.tell(new DeviceManager.RequestTrackDevice("group", "wrongDevice"), probe.getRef()); probe.expectNoMsg(); } + //#device-registration-tests + //#device-read-test @Test public void testReplyWithEmptyReadingIfNoTemperatureIsKnown() { TestKit probe = new TestKit(system); @@ -62,7 +65,9 @@ public class DeviceTest extends JUnitSuite { assertEquals(42L, response.requestId); assertEquals(Optional.empty(), response.value); } + //#device-read-test + //#device-write-read-test @Test public void testReplyWithLatestTemperatureReading() { TestKit probe = new TestKit(system); @@ -84,5 +89,6 @@ public class DeviceTest extends JUnitSuite { assertEquals(4L, response2.requestId); assertEquals(Optional.of(55.0), response2.value); } + //#device-write-read-test } diff --git a/akka-docs/src/test/java/jdocs/tutorial_5/Device.java b/akka-docs/src/test/java/jdocs/tutorial_5/Device.java index 00d6b5e131..b330ff5969 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_5/Device.java +++ b/akka-docs/src/test/java/jdocs/tutorial_5/Device.java @@ -7,6 +7,7 @@ import akka.actor.AbstractActor; import akka.actor.Props; import akka.event.Logging; import akka.event.LoggingAdapter; + import jdocs.tutorial_5.DeviceManager.DeviceRegistered; import jdocs.tutorial_5.DeviceManager.RequestTrackDevice; diff --git a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroup.java b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroup.java index de09c84fb8..19fbabe33c 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroup.java +++ b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroup.java @@ -16,6 +16,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +//#query-added public class DeviceGroup extends AbstractActor { private final LoggingAdapter log = Logging.getLogger(getContext().getSystem(), this); @@ -47,6 +48,7 @@ public class DeviceGroup extends AbstractActor { } } + //#query-protocol public static final class RequestAllTemperatures { final long requestId; @@ -84,6 +86,8 @@ public class DeviceGroup extends AbstractActor { public static final class DeviceTimedOut implements TemperatureReading { } + //#query-protocol + final Map deviceIdToActor = new HashMap<>(); final Map actorToDeviceId = new HashMap<>(); @@ -99,6 +103,7 @@ public class DeviceGroup extends AbstractActor { log.info("DeviceGroup {} stopped", groupId); } + //#query-added private void onTrackDevice(DeviceManager.RequestTrackDevice trackMsg) { if (this.groupId.equals(trackMsg.groupId)) { ActorRef ref = deviceIdToActor.get(trackMsg.deviceId); @@ -131,6 +136,7 @@ public class DeviceGroup extends AbstractActor { actorToDeviceId.remove(deviceActor); deviceIdToActor.remove(deviceId); } + //#query-added private void onAllTemperatures(RequestAllTemperatures r) { getContext().actorOf(DeviceGroupQuery.props( @@ -139,11 +145,15 @@ public class DeviceGroup extends AbstractActor { @Override public Receive createReceive() { + //#query-added return receiveBuilder() .match(DeviceManager.RequestTrackDevice.class, this::onTrackDevice) .match(RequestDeviceList.class, this::onDeviceList) .match(Terminated.class, this::onTerminated) + //#query-added + // ... other cases omitted .match(RequestAllTemperatures.class, this::onAllTemperatures) .build(); } } +//#query-added diff --git a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQuery.java b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQuery.java index 07df119dc8..f351f58c9f 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQuery.java +++ b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQuery.java @@ -3,16 +3,24 @@ */ package jdocs.tutorial_5; -import akka.actor.*; -import akka.event.Logging; -import akka.event.LoggingAdapter; -import scala.concurrent.duration.FiniteDuration; - import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import scala.concurrent.duration.FiniteDuration; + +import akka.actor.AbstractActor; +import akka.actor.ActorRef; +import akka.actor.Cancellable; +import akka.actor.Props; +import akka.actor.Terminated; + +import akka.event.Logging; +import akka.event.LoggingAdapter; + +//#query-full +//#query-outline public class DeviceGroupQuery extends AbstractActor { public static final class CollectionTimeout { } @@ -52,6 +60,8 @@ public class DeviceGroupQuery extends AbstractActor { queryTimeoutTimer.cancel(); } + //#query-outline + //#query-state @Override public Receive createReceive() { return waitingForReplies(new HashMap<>(), actorToDeviceId.keySet()); @@ -69,10 +79,7 @@ public class DeviceGroupQuery extends AbstractActor { receivedResponse(deviceActor, reading, stillWaiting, repliesSoFar); }) .match(Terminated.class, t -> { - if (stillWaiting.contains(t.getActor())) { - receivedResponse(t.getActor(), new DeviceGroup.DeviceNotAvailable(), stillWaiting, repliesSoFar); - } - // else ignore + receivedResponse(t.getActor(), new DeviceGroup.DeviceNotAvailable(), stillWaiting, repliesSoFar); }) .match(CollectionTimeout.class, t -> { Map replies = new HashMap<>(repliesSoFar); @@ -85,7 +92,9 @@ public class DeviceGroupQuery extends AbstractActor { }) .build(); } + //#query-state + //#query-collect-reply public void receivedResponse(ActorRef deviceActor, DeviceGroup.TemperatureReading reading, Set stillWaiting, @@ -105,4 +114,8 @@ public class DeviceGroupQuery extends AbstractActor { getContext().become(waitingForReplies(newRepliesSoFar, newStillWaiting)); } } + //#query-collect-reply + //#query-outline } +//#query-outline +//#query-full diff --git a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java index 26b741b0e6..c60cdcdcbc 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupQueryTest.java @@ -11,10 +11,10 @@ import akka.testkit.javadsl.TestKit; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import org.scalatest.junit.JUnitSuite; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import org.scalatest.junit.JUnitSuite; import scala.concurrent.duration.FiniteDuration; import java.util.HashMap; @@ -37,6 +37,7 @@ public class DeviceGroupQueryTest extends JUnitSuite { system = null; } + //#query-test-normal @Test public void testReturnTemperatureValueForWorkingDevices() { TestKit requester = new TestKit(system); @@ -69,7 +70,9 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } + //#query-test-normal + //#query-test-no-reading @Test public void testReturnTemperatureNotAvailableForDevicesWithNoReadings() { TestKit requester = new TestKit(system); @@ -102,7 +105,9 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } + //#query-test-no-reading + //#query-test-stopped @Test public void testReturnDeviceNotAvailableIfDeviceStopsBeforeAnswering() { TestKit requester = new TestKit(system); @@ -135,7 +140,9 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } + //#query-test-stopped + //#query-test-stopped-later @Test public void testReturnTemperatureReadingEvenIfDeviceStopsAfterAnswering() { TestKit requester = new TestKit(system); @@ -169,7 +176,9 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } + //#query-test-stopped-later + //#query-test-timeout @Test public void testReturnDeviceTimedOutIfDeviceDoesNotAnswerInTime() { TestKit requester = new TestKit(system); @@ -203,6 +212,7 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } + //#query-test-timeout public static void assertEqualTemperatures(Map expected, Map actual) { for (Map.Entry entry : expected.entrySet()) { diff --git a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupTest.java b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupTest.java index fdfb97a6c0..f6c9dc62ad 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceGroupTest.java @@ -16,10 +16,11 @@ import akka.testkit.javadsl.TestKit; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import org.scalatest.junit.JUnitSuite; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; +import org.scalatest.junit.JUnitSuite; + import static jdocs.tutorial_5.DeviceGroupQueryTest.assertEqualTemperatures; public class DeviceGroupTest extends JUnitSuite { @@ -132,6 +133,7 @@ public class DeviceGroupTest extends JUnitSuite { }); } + //#group-query-integration-test @Test public void testCollectTemperaturesFromAllActiveDevices() { TestKit probe = new TestKit(system); @@ -167,4 +169,5 @@ public class DeviceGroupTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } + //#group-query-integration-test } diff --git a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceTest.java b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceTest.java index 10c2235718..c8b68933b9 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_5/DeviceTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_5/DeviceTest.java @@ -16,7 +16,6 @@ import static org.junit.Assert.assertEquals; import org.scalatest.junit.JUnitSuite; - public class DeviceTest extends JUnitSuite { static ActorSystem system; diff --git a/akka-docs/src/test/java/jdocs/tutorial_2/Device.java b/akka-docs/src/test/java/jdocs/tutorial_6/Device.java similarity index 78% rename from akka-docs/src/test/java/jdocs/tutorial_2/Device.java rename to akka-docs/src/test/java/jdocs/tutorial_6/Device.java index 8d3f51b298..56fc3364c8 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_2/Device.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/Device.java @@ -1,17 +1,16 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_2; - -//#full-device - -import java.util.Optional; +package jdocs.tutorial_6; import akka.actor.AbstractActor; -import akka.actor.AbstractActor.Receive; import akka.actor.Props; import akka.event.Logging; import akka.event.LoggingAdapter; +import jdocs.tutorial_6.DeviceManager.DeviceRegistered; +import jdocs.tutorial_6.DeviceManager.RequestTrackDevice; + +import java.util.Optional; public class Device extends AbstractActor { private final LoggingAdapter log = Logging.getLogger(getContext().getSystem(), this); @@ -80,6 +79,16 @@ public class Device extends AbstractActor { @Override public Receive createReceive() { return receiveBuilder() + .match(RequestTrackDevice.class, r -> { + if (this.groupId.equals(r.groupId) && this.deviceId.equals(r.deviceId)) { + getSender().tell(new DeviceRegistered(), getSelf()); + } else { + log.warning( + "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", + r.groupId, r.deviceId, this.groupId, this.deviceId + ); + } + }) .match(RecordTemperature.class, r -> { log.info("Recorded temperature reading {} with {}", r.value, r.requestId); lastTemperatureReading = Optional.of(r.value); @@ -91,4 +100,3 @@ public class Device extends AbstractActor { .build(); } } -//#full-device diff --git a/akka-docs/src/test/java/jdocs/tutorial_3/DeviceGroup.java b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroup.java similarity index 62% rename from akka-docs/src/test/java/jdocs/tutorial_3/DeviceGroup.java rename to akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroup.java index 7a67b34c18..d67cb4761e 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_3/DeviceGroup.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroup.java @@ -1,11 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_3; - -import java.util.Set; -import java.util.Map; -import java.util.HashMap; +package jdocs.tutorial_6; import akka.actor.AbstractActor; import akka.actor.ActorRef; @@ -13,11 +9,13 @@ import akka.actor.Props; import akka.actor.Terminated; import akka.event.Logging; import akka.event.LoggingAdapter; +import scala.concurrent.duration.FiniteDuration; -import jdocs.tutorial_3.Device; -import jdocs.tutorial_3.DeviceManager; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; -//#device-group-full public class DeviceGroup extends AbstractActor { private final LoggingAdapter log = Logging.getLogger(getContext().getSystem(), this); @@ -27,11 +25,9 @@ public class DeviceGroup extends AbstractActor { this.groupId = groupId; } - //#device-group-register public static Props props(String groupId) { return Props.create(DeviceGroup.class, groupId); } - //#device-group-register public static final class RequestDeviceList { final long requestId; @@ -50,15 +46,48 @@ public class DeviceGroup extends AbstractActor { this.ids = ids; } } - //#device-group-register -//#device-group-register -//#device-group-register -//#device-group-remove + + public static final class RequestAllTemperatures { + final long requestId; + + public RequestAllTemperatures(long requestId) { + this.requestId = requestId; + } + } + + public static final class RespondAllTemperatures { + final long requestId; + final Map temperatures; + + public RespondAllTemperatures(long requestId, Map temperatures) { + this.requestId = requestId; + this.temperatures = temperatures; + } + } + + public static interface TemperatureReading { + } + + public static final class Temperature implements TemperatureReading { + public final double value; + + public Temperature(double value) { + this.value = value; + } + } + + public static final class TemperatureNotAvailable implements TemperatureReading { + } + + public static final class DeviceNotAvailable implements TemperatureReading { + } + + public static final class DeviceTimedOut implements TemperatureReading { + } final Map deviceIdToActor = new HashMap<>(); - //#device-group-register final Map actorToDeviceId = new HashMap<>(); - //#device-group-register + final long nextCollectionId = 0L; @Override public void preStart() { @@ -72,18 +101,16 @@ public class DeviceGroup extends AbstractActor { private void onTrackDevice(DeviceManager.RequestTrackDevice trackMsg) { if (this.groupId.equals(trackMsg.groupId)) { - ActorRef deviceActor = deviceIdToActor.get(trackMsg.deviceId); - if (deviceActor != null) { - deviceActor.forward(trackMsg, getContext()); + ActorRef ref = deviceIdToActor.get(trackMsg.deviceId); + if (ref != null) { + ref.forward(trackMsg, getContext()); } else { log.info("Creating device actor for {}", trackMsg.deviceId); - deviceActor = getContext().actorOf(Device.props(groupId, trackMsg.deviceId), "device-" + trackMsg.deviceId); - //#device-group-register + ActorRef deviceActor = getContext().actorOf(Device.props(groupId, trackMsg.deviceId), "device-" + trackMsg.deviceId); getContext().watch(deviceActor); - actorToDeviceId.put(deviceActor, trackMsg.deviceId); - //#device-group-register - deviceIdToActor.put(trackMsg.deviceId, deviceActor); deviceActor.forward(trackMsg, getContext()); + actorToDeviceId.put(deviceActor, trackMsg.deviceId); + deviceIdToActor.put(trackMsg.deviceId, deviceActor); } } else { log.warning( @@ -105,15 +132,18 @@ public class DeviceGroup extends AbstractActor { deviceIdToActor.remove(deviceId); } + private void onAllTemperatures(RequestAllTemperatures r) { + getContext().actorOf(DeviceGroupQuery.props( + actorToDeviceId, r.requestId, getSender(), new FiniteDuration(3, TimeUnit.SECONDS))); + } + @Override public Receive createReceive() { return receiveBuilder() .match(DeviceManager.RequestTrackDevice.class, this::onTrackDevice) .match(RequestDeviceList.class, this::onDeviceList) .match(Terminated.class, this::onTerminated) + .match(RequestAllTemperatures.class, this::onAllTemperatures) .build(); } } -//#device-group-remove -//#device-group-register -//#device-group-full diff --git a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupQuery.java b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupQuery.java similarity index 88% rename from akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupQuery.java rename to akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupQuery.java index 24699c4fa7..e145868dd0 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupQuery.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupQuery.java @@ -1,26 +1,18 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_4; +package jdocs.tutorial_6; + +import akka.actor.*; +import akka.event.Logging; +import akka.event.LoggingAdapter; +import scala.concurrent.duration.FiniteDuration; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; -import scala.concurrent.duration.FiniteDuration; - -import akka.actor.AbstractActor; -import akka.actor.ActorRef; -import akka.actor.Cancellable; -import akka.actor.Props; -import akka.actor.Terminated; - -import akka.event.Logging; -import akka.event.LoggingAdapter; - -//#query-full -//#query-outline public class DeviceGroupQuery extends AbstractActor { public static final class CollectionTimeout { } @@ -60,8 +52,6 @@ public class DeviceGroupQuery extends AbstractActor { queryTimeoutTimer.cancel(); } - //#query-outline - //#query-state @Override public Receive createReceive() { return waitingForReplies(new HashMap<>(), actorToDeviceId.keySet()); @@ -79,7 +69,10 @@ public class DeviceGroupQuery extends AbstractActor { receivedResponse(deviceActor, reading, stillWaiting, repliesSoFar); }) .match(Terminated.class, t -> { - receivedResponse(t.getActor(), new DeviceGroup.DeviceNotAvailable(), stillWaiting, repliesSoFar); + if (stillWaiting.contains(t.getActor())) { + receivedResponse(t.getActor(), new DeviceGroup.DeviceNotAvailable(), stillWaiting, repliesSoFar); + } + // else ignore }) .match(CollectionTimeout.class, t -> { Map replies = new HashMap<>(repliesSoFar); @@ -92,9 +85,7 @@ public class DeviceGroupQuery extends AbstractActor { }) .build(); } - //#query-state - //#query-collect-reply public void receivedResponse(ActorRef deviceActor, DeviceGroup.TemperatureReading reading, Set stillWaiting, @@ -114,8 +105,4 @@ public class DeviceGroupQuery extends AbstractActor { getContext().become(waitingForReplies(newRepliesSoFar, newStillWaiting)); } } - //#query-collect-reply - //#query-outline } -//#query-outline -//#query-full \ No newline at end of file diff --git a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupQueryTest.java b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupQueryTest.java similarity index 96% rename from akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupQueryTest.java rename to akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupQueryTest.java index f4f83f9711..1d33c156c1 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_4/DeviceGroupQueryTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupQueryTest.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_4; +package jdocs.tutorial_6; import akka.actor.ActorRef; import akka.actor.ActorSystem; @@ -11,10 +11,10 @@ import akka.testkit.javadsl.TestKit; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; +import org.scalatest.junit.JUnitSuite; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import org.scalatest.junit.JUnitSuite; import scala.concurrent.duration.FiniteDuration; import java.util.HashMap; @@ -37,7 +37,6 @@ public class DeviceGroupQueryTest extends JUnitSuite { system = null; } - //#query-test-normal @Test public void testReturnTemperatureValueForWorkingDevices() { TestKit requester = new TestKit(system); @@ -70,9 +69,7 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } - //#query-test-normal - //#query-test-no-reading @Test public void testReturnTemperatureNotAvailableForDevicesWithNoReadings() { TestKit requester = new TestKit(system); @@ -105,9 +102,7 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } - //#query-test-no-reading - //#query-test-stopped @Test public void testReturnDeviceNotAvailableIfDeviceStopsBeforeAnswering() { TestKit requester = new TestKit(system); @@ -140,9 +135,7 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } - //#query-test-stopped - //#query-test-stopped-later @Test public void testReturnTemperatureReadingEvenIfDeviceStopsAfterAnswering() { TestKit requester = new TestKit(system); @@ -176,9 +169,7 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } - //#query-test-stopped-later - //#query-test-timeout @Test public void testReturnDeviceTimedOutIfDeviceDoesNotAnswerInTime() { TestKit requester = new TestKit(system); @@ -212,7 +203,6 @@ public class DeviceGroupQueryTest extends JUnitSuite { assertEqualTemperatures(expectedTemperatures, response.temperatures); } - //#query-test-timeout public static void assertEqualTemperatures(Map expected, Map actual) { for (Map.Entry entry : expected.entrySet()) { diff --git a/akka-docs/src/test/java/jdocs/tutorial_3/DeviceGroupTest.java b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupTest.java similarity index 70% rename from akka-docs/src/test/java/jdocs/tutorial_3/DeviceGroupTest.java rename to akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupTest.java index 4c502b5f5d..f83882e164 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_3/DeviceGroupTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceGroupTest.java @@ -1,10 +1,12 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_3; +package jdocs.tutorial_6; -import java.util.stream.Stream; +import java.util.HashMap; +import java.util.Map; import java.util.stream.Collectors; +import java.util.stream.Stream; import akka.actor.ActorRef; import akka.actor.ActorSystem; @@ -14,10 +16,11 @@ import akka.testkit.javadsl.TestKit; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; +import org.scalatest.junit.JUnitSuite; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; -import org.scalatest.junit.JUnitSuite; +import static jdocs.tutorial_6.DeviceGroupQueryTest.assertEqualTemperatures; public class DeviceGroupTest extends JUnitSuite { @@ -34,7 +37,6 @@ public class DeviceGroupTest extends JUnitSuite { system = null; } - //#device-group-test-registration @Test public void testRegisterDeviceActor() { TestKit probe = new TestKit(system); @@ -49,7 +51,7 @@ public class DeviceGroupTest extends JUnitSuite { ActorRef deviceActor2 = probe.getLastSender(); assertNotEquals(deviceActor1, deviceActor2); - // Check that the device actors are workingl + // Check that the device actors are working deviceActor1.tell(new Device.RecordTemperature(0L, 1.0), probe.getRef()); assertEquals(0L, probe.expectMsgClass(Device.TemperatureRecorded.class).requestId); deviceActor2.tell(new Device.RecordTemperature(1L, 2.0), probe.getRef()); @@ -64,9 +66,7 @@ public class DeviceGroupTest extends JUnitSuite { groupActor.tell(new DeviceManager.RequestTrackDevice("wrongGroup", "device1"), probe.getRef()); probe.expectNoMsg(); } - //#device-group-test-registration - //#device-group-test3 @Test public void testReturnSameActorForSameDeviceId() { TestKit probe = new TestKit(system); @@ -81,9 +81,7 @@ public class DeviceGroupTest extends JUnitSuite { ActorRef deviceActor2 = probe.getLastSender(); assertEquals(deviceActor1, deviceActor2); } - //#device-group-test3 - //#device-group-list-terminate-test @Test public void testListActiveDevices() { TestKit probe = new TestKit(system); @@ -133,5 +131,40 @@ public class DeviceGroupTest extends JUnitSuite { return null; }); } - //#device-group-list-terminate-test + + @Test + public void testCollectTemperaturesFromAllActiveDevices() { + TestKit probe = new TestKit(system); + ActorRef groupActor = system.actorOf(DeviceGroup.props("group")); + + groupActor.tell(new DeviceManager.RequestTrackDevice("group", "device1"), probe.getRef()); + probe.expectMsgClass(DeviceManager.DeviceRegistered.class); + ActorRef deviceActor1 = probe.getLastSender(); + + groupActor.tell(new DeviceManager.RequestTrackDevice("group", "device2"), probe.getRef()); + probe.expectMsgClass(DeviceManager.DeviceRegistered.class); + ActorRef deviceActor2 = probe.getLastSender(); + + groupActor.tell(new DeviceManager.RequestTrackDevice("group", "device3"), probe.getRef()); + probe.expectMsgClass(DeviceManager.DeviceRegistered.class); + ActorRef deviceActor3 = probe.getLastSender(); + + // Check that the device actors are working + deviceActor1.tell(new Device.RecordTemperature(0L, 1.0), probe.getRef()); + assertEquals(0L, probe.expectMsgClass(Device.TemperatureRecorded.class).requestId); + deviceActor2.tell(new Device.RecordTemperature(1L, 2.0), probe.getRef()); + assertEquals(1L, probe.expectMsgClass(Device.TemperatureRecorded.class).requestId); + // No temperature for device 3 + + groupActor.tell(new DeviceGroup.RequestAllTemperatures(0L), probe.getRef()); + DeviceGroup.RespondAllTemperatures response = probe.expectMsgClass(DeviceGroup.RespondAllTemperatures.class); + assertEquals(0L, response.requestId); + + Map expectedTemperatures = new HashMap<>(); + expectedTemperatures.put("device1", new DeviceGroup.Temperature(1.0)); + expectedTemperatures.put("device2", new DeviceGroup.Temperature(2.0)); + expectedTemperatures.put("device3", new DeviceGroup.TemperatureNotAvailable()); + + assertEqualTemperatures(expectedTemperatures, response.temperatures); + } } diff --git a/akka-docs/src/test/java/jdocs/tutorial_3/DeviceManager.java b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceManager.java similarity index 94% rename from akka-docs/src/test/java/jdocs/tutorial_3/DeviceManager.java rename to akka-docs/src/test/java/jdocs/tutorial_6/DeviceManager.java index 8a1fb813ac..1b307fb7e5 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_3/DeviceManager.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceManager.java @@ -2,10 +2,7 @@ * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_3; - -import java.util.Map; -import java.util.HashMap; +package jdocs.tutorial_6; import akka.actor.AbstractActor; import akka.actor.ActorRef; @@ -14,7 +11,9 @@ import akka.actor.Terminated; import akka.event.Logging; import akka.event.LoggingAdapter; -//#device-manager-full +import java.util.HashMap; +import java.util.Map; + public class DeviceManager extends AbstractActor { private final LoggingAdapter log = Logging.getLogger(getContext().getSystem(), this); @@ -22,7 +21,6 @@ public class DeviceManager extends AbstractActor { return Props.create(DeviceManager.class); } - //#device-manager-msgs public static final class RequestTrackDevice { public final String groupId; public final String deviceId; @@ -36,7 +34,6 @@ public class DeviceManager extends AbstractActor { public static final class DeviceRegistered { } - //#device-manager-msgs final Map groupIdToActor = new HashMap<>(); final Map actorToGroupId = new HashMap<>(); @@ -81,4 +78,3 @@ public class DeviceManager extends AbstractActor { } } -//#device-manager-full diff --git a/akka-docs/src/test/java/jdocs/tutorial_2/DeviceTest.java b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceTest.java similarity index 71% rename from akka-docs/src/test/java/jdocs/tutorial_2/DeviceTest.java rename to akka-docs/src/test/java/jdocs/tutorial_6/DeviceTest.java index 0cd3bb5bea..1848dbaabc 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_2/DeviceTest.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/DeviceTest.java @@ -1,10 +1,14 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_2; +package jdocs.tutorial_6; import java.util.Optional; +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.testkit.javadsl.TestKit; + import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -12,9 +16,6 @@ import static org.junit.Assert.assertEquals; import org.scalatest.junit.JUnitSuite; -import akka.actor.ActorSystem; -import akka.actor.ActorRef; -import akka.testkit.javadsl.TestKit; public class DeviceTest extends JUnitSuite { @@ -31,7 +32,28 @@ public class DeviceTest extends JUnitSuite { system = null; } - //#device-read-test + @Test + public void testReplyToRegistrationRequests() { + TestKit probe = new TestKit(system); + ActorRef deviceActor = system.actorOf(Device.props("group", "device")); + + deviceActor.tell(new DeviceManager.RequestTrackDevice("group", "device"), probe.getRef()); + probe.expectMsgClass(DeviceManager.DeviceRegistered.class); + assertEquals(deviceActor, probe.getLastSender()); + } + + @Test + public void testIgnoreWrongRegistrationRequests() { + TestKit probe = new TestKit(system); + ActorRef deviceActor = system.actorOf(Device.props("group", "device")); + + deviceActor.tell(new DeviceManager.RequestTrackDevice("wrongGroup", "device"), probe.getRef()); + probe.expectNoMsg(); + + deviceActor.tell(new DeviceManager.RequestTrackDevice("group", "wrongDevice"), probe.getRef()); + probe.expectNoMsg(); + } + @Test public void testReplyWithEmptyReadingIfNoTemperatureIsKnown() { TestKit probe = new TestKit(system); @@ -41,9 +63,7 @@ public class DeviceTest extends JUnitSuite { assertEquals(42L, response.requestId); assertEquals(Optional.empty(), response.value); } - //#device-read-test - //#device-write-read-test @Test public void testReplyWithLatestTemperatureReading() { TestKit probe = new TestKit(system); @@ -65,6 +85,5 @@ public class DeviceTest extends JUnitSuite { assertEquals(4L, response2.requestId); assertEquals(Optional.of(55.0), response2.value); } - //#device-write-read-test } diff --git a/akka-docs/src/test/java/jdocs/tutorial_5/IotMain.java b/akka-docs/src/test/java/jdocs/tutorial_6/IotMain.java similarity index 96% rename from akka-docs/src/test/java/jdocs/tutorial_5/IotMain.java rename to akka-docs/src/test/java/jdocs/tutorial_6/IotMain.java index d0217925e5..a7c396c6f6 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_5/IotMain.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/IotMain.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_5; +package jdocs.tutorial_6; import akka.actor.ActorRef; import akka.actor.ActorSystem; @@ -26,4 +26,4 @@ public class IotMain { } } -} \ No newline at end of file +} diff --git a/akka-docs/src/test/java/jdocs/tutorial_5/IotSupervisor.java b/akka-docs/src/test/java/jdocs/tutorial_6/IotSupervisor.java similarity index 96% rename from akka-docs/src/test/java/jdocs/tutorial_5/IotSupervisor.java rename to akka-docs/src/test/java/jdocs/tutorial_6/IotSupervisor.java index 28c7bcd506..8f8fb9d0aa 100644 --- a/akka-docs/src/test/java/jdocs/tutorial_5/IotSupervisor.java +++ b/akka-docs/src/test/java/jdocs/tutorial_6/IotSupervisor.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package jdocs.tutorial_5; +package jdocs.tutorial_6; //#iot-supervisor diff --git a/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala b/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala index 6af4d5bc95..f5b99dab57 100644 --- a/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala @@ -34,43 +34,41 @@ class SharedMutableStateDocSpec { def expensiveCalculation(actorRef: ActorRef): String = { // this is a very costly operation - "Meaning of live is 42" + "Meaning of life is 42" } def expensiveCalculation(): String = { // this is a very costly operation - "Meaning of live is 42" + "Meaning of life is 42" } def receive = { case _ => - - //Wrong ways implicit val ec = context.dispatcher implicit val timeout = Timeout(5 seconds) // needed for `?` below - // Very bad, shared mutable state, - // will break your application in weird ways + // Example of incorrect approach + // Very bad: shared mutable state will cause your + // application to break in weird ways Future { state = "This will race" } ((echoActor ? Message("With this other one")).mapTo[Message]) .foreach { received => state = received.msg } - // Very bad, shared mutable object, - // the other actor cand mutate your own state, + // Very bad: shared mutable object allows + // the other actor to mutate your own state, // or worse, you might get weird race conditions cleanUpActor ! mySet - // Very bad, "sender" changes for every message, + // Very bad: "sender" changes for every message, // shared mutable state bug Future { expensiveCalculation(sender()) } - //Right ways - - // Completely safe, "self" is OK to close over + // Example of correct approach + // Completely safe: "self" is OK to close over // and it's an ActorRef, which is thread-safe Future { expensiveCalculation() } foreach { self ! _ } - // Completely safe, we close over a fixed value + // Completely safe: we close over a fixed value // and it's an ActorRef, which is thread-safe val currentSender = sender() Future { expensiveCalculation(currentSender) } diff --git a/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala b/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala new file mode 100644 index 0000000000..dfae34d9cf --- /dev/null +++ b/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala @@ -0,0 +1,33 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ +package docs.actor + +import akka.actor.Actor +import scala.concurrent.duration._ + +object TimerDocSpec { + //#timers + import akka.actor.Timers + + object MyActor { + private case object TickKey + private case object FirstTick + private case object Tick + private case object LaterTick + } + + class MyActor extends Actor with Timers { + import MyActor._ + timers.startSingleTimer(TickKey, FirstTick, 500.millis) + + def receive = { + case FirstTick => + // do something useful here + timers.startPeriodicTimer(TickKey, Tick, 1.second) + case Tick => + // do something useful here + } + } + //#timers +} diff --git a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala index 897c331e65..aa1db19db6 100644 --- a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala +++ b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala @@ -78,7 +78,7 @@ abstract class FactorialFrontend2 extends Actor { AdaptiveLoadBalancingGroup(HeapMetricsSelector), ClusterRouterGroupSettings( totalInstances = 100, routeesPaths = List("/user/factorialBackend"), - allowLocalRoutees = true, useRole = Some("backend"))).props(), + allowLocalRoutees = true, useRoles = Set("backend"))).props(), name = "factorialBackendRouter2") //#router-lookup-in-code @@ -96,7 +96,7 @@ abstract class FactorialFrontend3 extends Actor { ClusterRouterPool(AdaptiveLoadBalancingPool( SystemLoadAverageMetricsSelector), ClusterRouterPoolSettings( totalInstances = 100, maxInstancesPerNode = 3, - allowLocalRoutees = false, useRole = Some("backend"))).props(Props[FactorialBackend]), + allowLocalRoutees = false, useRoles = Set("backend"))).props(Props[FactorialBackend]), name = "factorialBackendRouter3") //#router-deploy-in-code } diff --git a/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala index 0e71951987..7af5de2967 100644 --- a/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala @@ -92,6 +92,28 @@ object DispatcherDocSpec { } //#my-thread-pool-dispatcher-config + //#affinity-pool-dispatcher-config + affinity-pool-dispatcher { + # Dispatcher is the name of the event-based dispatcher + type = Dispatcher + # What kind of ExecutionService to use + executor = "affinity-pool-executor" + # Configuration for the thread pool + affinity-pool-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + # Parallelism (threads) ... ceil(available processors * factor) + parallelism-factor = 1 + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 16 + } + # Throughput defines the maximum number of messages to be + # processed per actor before the thread jumps to the next actor. + # Set to 1 for as fair as possible. + throughput = 100 + } + //#affinity-pool-dispatcher-config + //#fixed-pool-size-dispatcher-config blocking-io-dispatcher { type = Dispatcher @@ -294,6 +316,14 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { //#defining-pinned-dispatcher } + "defining affinity-pool dispatcher" in { + val context = system + //#defining-affinity-pool-dispatcher + val myActor = + context.actorOf(Props[MyActor].withDispatcher("affinity-pool-dispatcher"), "myactor4") + //#defining-affinity-pool-dispatcher + } + "looking up a dispatcher" in { //#lookup // for use with Futures, Scheduler, etc. diff --git a/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala b/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala index 7353a9438e..a49029e894 100644 --- a/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala +++ b/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala @@ -15,9 +15,6 @@ import com.typesafe.config.Config import java.util.concurrent.ConcurrentLinkedQueue import scala.Option -// Marker trait used for mailbox requirements mapping -trait MyUnboundedMessageQueueSemantics - object MyUnboundedMailbox { // This is the MessageQueue implementation class MyMessageQueue extends MessageQueue @@ -58,3 +55,8 @@ class MyUnboundedMailbox extends MailboxType new MyMessageQueue() } //#mailbox-implementation-example + +//#mailbox-marker-interface +// Marker trait used for mailbox requirements mapping +trait MyUnboundedMessageQueueSemantics +//#mailbox-marker-interface \ No newline at end of file diff --git a/akka-docs/src/test/scala/docs/pattern/SchedulerPatternSpec.scala b/akka-docs/src/test/scala/docs/pattern/SchedulerPatternSpec.scala deleted file mode 100644 index 5dde1d4cdb..0000000000 --- a/akka-docs/src/test/scala/docs/pattern/SchedulerPatternSpec.scala +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright (C) 2009-2017 Lightbend Inc. - */ - -package docs.pattern - -import language.postfixOps - -import akka.actor.{ Props, ActorRef, Actor } -import scala.concurrent.duration._ -import akka.testkit.{ TimingTest, AkkaSpec, filterException } -import docs.pattern.SchedulerPatternSpec.ScheduleInConstructor - -object SchedulerPatternSpec { - //#schedule-constructor - class ScheduleInConstructor extends Actor { - import context.dispatcher - val tick = - context.system.scheduler.schedule(500 millis, 1000 millis, self, "tick") - //#schedule-constructor - // this var and constructor is declared here to not show up in the docs - var target: ActorRef = null - def this(target: ActorRef) = { this(); this.target = target } - //#schedule-constructor - - override def postStop() = tick.cancel() - - def receive = { - case "tick" => - // do something useful here - //#schedule-constructor - target ! "tick" - case "restart" => - throw new ArithmeticException - //#schedule-constructor - } - } - //#schedule-constructor - - //#schedule-receive - class ScheduleInReceive extends Actor { - import context._ - //#schedule-receive - // this var and constructor is declared here to not show up in the docs - var target: ActorRef = null - def this(target: ActorRef) = { this(); this.target = target } - //#schedule-receive - - override def preStart() = - system.scheduler.scheduleOnce(500 millis, self, "tick") - - // override postRestart so we don't call preStart and schedule a new message - override def postRestart(reason: Throwable) = {} - - def receive = { - case "tick" => - // send another periodic tick after the specified delay - system.scheduler.scheduleOnce(1000 millis, self, "tick") - // do something useful here - //#schedule-receive - target ! "tick" - case "restart" => - throw new ArithmeticException - //#schedule-receive - } - } - //#schedule-receive -} - -class SchedulerPatternSpec extends AkkaSpec { - - def testSchedule(actor: ActorRef, startDuration: FiniteDuration, - afterRestartDuration: FiniteDuration) = { - - filterException[ArithmeticException] { - within(startDuration) { - expectMsg("tick") - expectMsg("tick") - expectMsg("tick") - } - actor ! "restart" - within(afterRestartDuration) { - expectMsg("tick") - expectMsg("tick") - } - system.stop(actor) - } - } - - "send periodic ticks from the constructor" taggedAs TimingTest in { - testSchedule( - system.actorOf(Props(classOf[ScheduleInConstructor], testActor)), - 3000 millis, 2000 millis) - } - - "send ticks from the preStart and receive" taggedAs TimingTest in { - testSchedule( - system.actorOf(Props(classOf[ScheduleInConstructor], testActor)), - 3000 millis, 2500 millis) - } -} diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala index 3a594a2eef..91c125eb11 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala @@ -4,8 +4,11 @@ package docs.persistence +import java.io.NotSerializableException + import scala.language.reflectiveCalls import java.nio.charset.Charset + import akka.actor.ActorSystem import akka.persistence.journal.{ EventAdapter, EventSeq } import akka.serialization.{ SerializationExtension, SerializerWithStringManifest } @@ -13,6 +16,7 @@ import akka.testkit.TestKit import com.typesafe.config._ import org.scalatest.WordSpec import spray.json.JsObject + import scala.concurrent.duration._ import docs.persistence.proto.FlightAppModels @@ -82,7 +86,7 @@ class ProtobufReadOptional { // use generated protobuf serializer seatReserved(FlightAppModels.SeatReserved.parseFrom(bytes)) case _ => - throw new IllegalArgumentException("Unable to handle manifest: " + manifest) + throw new NotSerializableException("Unable to handle manifest: " + manifest) } override def toBinary(o: AnyRef): Array[Byte] = o match { @@ -197,7 +201,7 @@ object SimplestCustomSerializer { val nameAndSurname = new String(bytes, Utf8) val Array(name, surname) = nameAndSurname.split("[|]") Person(name, surname) - case _ => throw new IllegalArgumentException( + case _ => throw new NotSerializableException( s"Unable to deserialize from bytes, manifest was: $manifest! Bytes length: " + bytes.length) } @@ -317,7 +321,7 @@ class RenamedEventAwareSerializer extends SerializerWithStringManifest { manifest match { case OldPayloadClassName => SamplePayload(new String(bytes, Utf8)) case MyPayloadClassName => SamplePayload(new String(bytes, Utf8)) - case other => throw new Exception(s"unexpected manifest [$other]") + case other => throw new NotSerializableException(s"unexpected manifest [$other]") } } //#string-serializer-handle-rename diff --git a/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala index 603f836c8f..24037157fb 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala @@ -89,4 +89,57 @@ class FlowErrorDocSpec extends AkkaSpec { Await.result(result, 3.seconds) should be(Vector(0, 1, 4, 0, 5, 12)) } + "demonstrate recover" in { + implicit val materializer = ActorMaterializer() + //#recover + Source(0 to 6).map(n => + if (n < 5) n.toString + else throw new RuntimeException("Boom!") + ).recover { + case _: RuntimeException => "stream truncated" + }.runForeach(println) + //#recover + + /* +Output: +//#recover-output +0 +1 +2 +3 +4 +stream truncated +//#recover-output +*/ + } + + "demonstrate recoverWithRetries" in { + implicit val materializer = ActorMaterializer() + //#recoverWithRetries + val planB = Source(List("five", "six", "seven", "eight")) + + Source(0 to 10).map(n => + if (n < 5) n.toString + else throw new RuntimeException("Boom!") + ).recoverWithRetries(attempts = 1, { + case _: RuntimeException => planB + }).runForeach(println) + //#recoverWithRetries + + /* +Output: +//#recoverWithRetries-output +0 +1 +2 +3 +4 +five +six +seven +eight +//#recoverWithRetries-output + */ + } + } diff --git a/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala index cce8b53f43..2c2dd5f2c8 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala @@ -7,7 +7,6 @@ import java.util.concurrent.ThreadLocalRandom import akka.stream._ import akka.stream.scaladsl._ -import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, StageLogging } import akka.testkit.{ AkkaSpec, EventFilter } class GraphStageLoggingDocSpec extends AkkaSpec("akka.loglevel = DEBUG") { @@ -16,6 +15,8 @@ class GraphStageLoggingDocSpec extends AkkaSpec("akka.loglevel = DEBUG") { implicit val ec = system.dispatcher //#stage-with-logging + import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, StageLogging } + final class RandomLettersSource extends GraphStage[SourceShape[String]] { val out = Outlet[String]("RandomLettersSource.out") override val shape: SourceShape[String] = SourceShape(out) diff --git a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala index 2d234c66bc..892676e68c 100644 --- a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala @@ -10,6 +10,7 @@ import akka.testkit.AkkaSpec import docs.CompileOnlySpec import scala.concurrent.duration._ +import akka.stream.ThrottleMode class HubsDocSpec extends AkkaSpec with CompileOnlySpec { implicit val materializer = ActorMaterializer() @@ -104,6 +105,86 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { //#pub-sub-4 } + "demonstrate creating a dynamic partition hub" in compileOnlySpec { + //#partition-hub + // A simple producer that publishes a new "message-" every second + val producer = Source.tick(1.second, 1.second, "message") + .zipWith(Source(1 to 100))((a, b) => s"$a-$b") + + // Attach a PartitionHub Sink to the producer. This will materialize to a + // corresponding Source. + // (We need to use toMat and Keep.right since by default the materialized + // value to the left is used) + val runnableGraph: RunnableGraph[Source[String, NotUsed]] = + producer.toMat(PartitionHub.sink( + (size, elem) => math.abs(elem.hashCode) % size, + startAfterNrOfConsumers = 2, bufferSize = 256))(Keep.right) + + // By running/materializing the producer, we get back a Source, which + // gives us access to the elements published by the producer. + val fromProducer: Source[String, NotUsed] = runnableGraph.run() + + // Print out messages from the producer in two independent consumers + fromProducer.runForeach(msg => println("consumer1: " + msg)) + fromProducer.runForeach(msg => println("consumer2: " + msg)) + //#partition-hub + } + + "demonstrate creating a dynamic stateful partition hub" in compileOnlySpec { + //#partition-hub-stateful + // A simple producer that publishes a new "message-" every second + val producer = Source.tick(1.second, 1.second, "message") + .zipWith(Source(1 to 100))((a, b) => s"$a-$b") + + // New instance of the partitioner function and its state is created + // for each materialization of the PartitionHub. + def roundRobin(): (PartitionHub.ConsumerInfo, String) ⇒ Long = { + var i = -1L + + (info, elem) => { + i += 1 + info.consumerIdByIdx((i % info.size).toInt) + } + } + + // Attach a PartitionHub Sink to the producer. This will materialize to a + // corresponding Source. + // (We need to use toMat and Keep.right since by default the materialized + // value to the left is used) + val runnableGraph: RunnableGraph[Source[String, NotUsed]] = + producer.toMat(PartitionHub.statefulSink( + () => roundRobin(), + startAfterNrOfConsumers = 2, bufferSize = 256))(Keep.right) + + // By running/materializing the producer, we get back a Source, which + // gives us access to the elements published by the producer. + val fromProducer: Source[String, NotUsed] = runnableGraph.run() + + // Print out messages from the producer in two independent consumers + fromProducer.runForeach(msg => println("consumer1: " + msg)) + fromProducer.runForeach(msg => println("consumer2: " + msg)) + //#partition-hub-stateful + } + + "demonstrate creating a dynamic partition hub routing to fastest consumer" in compileOnlySpec { + //#partition-hub-fastest + val producer = Source(0 until 100) + + // ConsumerInfo.queueSize is the approximate number of buffered elements for a consumer. + // Note that this is a moving target since the elements are consumed concurrently. + val runnableGraph: RunnableGraph[Source[Int, NotUsed]] = + producer.toMat(PartitionHub.statefulSink( + () => (info, elem) ⇒ info.consumerIds.minBy(id ⇒ info.queueSize(id)), + startAfterNrOfConsumers = 2, bufferSize = 16))(Keep.right) + + val fromProducer: Source[Int, NotUsed] = runnableGraph.run() + + fromProducer.runForeach(msg => println("consumer1: " + msg)) + fromProducer.throttle(10, 100.millis, 10, ThrottleMode.Shaping) + .runForeach(msg => println("consumer2: " + msg)) + //#partition-hub-fastest + } + } } diff --git a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala new file mode 100644 index 0000000000..9c75af6117 --- /dev/null +++ b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala @@ -0,0 +1,67 @@ +/** + * Copyright (C) 2015-2017 Lightbend Inc. + */ +package docs.stream + +import akka.NotUsed +import akka.stream.{ ActorMaterializer, KillSwitches } +import akka.stream.scaladsl._ +import akka.testkit.AkkaSpec +import docs.CompileOnlySpec + +import scala.concurrent.duration._ +import scala.concurrent._ + +class RestartDocSpec extends AkkaSpec with CompileOnlySpec { + implicit val materializer = ActorMaterializer() + import system.dispatcher + + // Mock akka-http interfaces + object Http { + def apply() = this + def singleRequest(req: HttpRequest) = Future.successful(()) + } + case class HttpRequest(uri: String) + case class Unmarshal(b: Any) { + def to[T]: Future[T] = Promise[T]().future + } + case class ServerSentEvent() + + def doSomethingElse(): Unit = () + + "Restart stages" should { + + "demonstrate a restart with backoff source" in compileOnlySpec { + + //#restart-with-backoff-source + val restartSource = RestartSource.withBackoff( + minBackoff = 3.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly + ) { () => + // Create a source from a future of a source + Source.fromFutureSource { + // Make a single request with akka-http + Http().singleRequest(HttpRequest( + uri = "http://example.com/eventstream" + )) + // Unmarshall it as a source of server sent events + .flatMap(Unmarshal(_).to[Source[ServerSentEvent, NotUsed]]) + } + } + //#restart-with-backoff-source + + //#with-kill-switch + val killSwitch = restartSource + .viaMat(KillSwitches.single)(Keep.right) + .toMat(Sink.foreach(event => println(s"Got event: $event")))(Keep.left) + .run() + + doSomethingElse() + + killSwitch.shutdown() + //#with-kill-switch + } + + } +} \ No newline at end of file diff --git a/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala index 0741bb4eb3..b245b6baa3 100644 --- a/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala @@ -29,8 +29,9 @@ object TwitterStreamQuickstartDocSpec { final case class Hashtag(name: String) final case class Tweet(author: Author, timestamp: Long, body: String) { - def hashtags: Set[Hashtag] = - body.split(" ").collect { case t if t.startsWith("#") => Hashtag(t) }.toSet + def hashtags: Set[Hashtag] = body.split(" ").collect { + case t if t.startsWith("#") => Hashtag(t.replaceAll("[^#\\w]", "")) + }.toSet } val akkaTag = Hashtag("#akka") @@ -150,10 +151,12 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { //#fiddle_code tweets - .filterNot(_.hashtags.contains(akkaTag)) - .mapConcat(_.hashtags) - .map(_.name.toUpperCase) - .runWith(Sink.foreach(println)) + .filterNot(_.hashtags.contains(akkaTag)) // Remove all tweets containing #akka hashtag + .map(_.hashtags) // Get all sets of hashtags ... + .reduce(_ ++ _) // ... and reduce them to a single set, removing duplicates across all tweets + .mapConcat(identity) // Flatten the stream of tweets to a stream of hashtags + .map(_.name.toUpperCase) // Convert all hashtags to upper case + .runWith(Sink.foreach(println)) // Attach the Flow to a Sink that will finally print the hashtags // $FiddleDependency org.akka-js %%% akkajsactorstream % 1.2.5.1 //#fiddle_code diff --git a/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala b/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala index ba425d1e2a..7e0cc711fa 100644 --- a/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala +++ b/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala @@ -1,9 +1,12 @@ -package tutorial_1 - -import akka.actor.{ Actor, Props } -import akka.testkit.AkkaSpec +// Prevent package clashes with the Java examples: +package docs.tutorial_1 //#print-refs +package com.lightbend.akka.sample + +import akka.actor.{ Actor, Props, ActorSystem } +import scala.io.StdIn + class PrintMyActorRefActor extends Actor { override def receive: Receive = { case "printit" => @@ -13,6 +16,8 @@ class PrintMyActorRefActor extends Actor { } //#print-refs +import akka.testkit.AkkaSpec + //#start-stop class StartStopActor1 extends Actor { override def preStart(): Unit = { @@ -62,30 +67,38 @@ class ActorHierarchyExperiments extends AkkaSpec { // format: OFF //#print-refs -val firstRef = system.actorOf(Props[PrintMyActorRefActor], "first-actor") -println(s"First : $firstRef") -firstRef ! "printit" +object ActorHierarchyExperiments extends App { + val system = ActorSystem() + + val firstRef = system.actorOf(Props[PrintMyActorRefActor], "first-actor") + println(s"First: $firstRef") + firstRef ! "printit" + + println(">>> Press ENTER to exit <<<") + try StdIn.readLine() + finally system.terminate() +} //#print-refs // format: ON } "start and stop actors" in { // format: OFF - //#start-stop + //#start-stop-main val first = system.actorOf(Props[StartStopActor1], "first") first ! "stop" - //#start-stop + //#start-stop-main // format: ON } "supervise actors" in { // format: OFF - //#supervise + //#supervise-main val supervisingActor = system.actorOf(Props[SupervisingActor], "supervising-actor") supervisingActor ! "failChild" - //#supervise + //#supervise-main // format: ON } } diff --git a/akka-docs/src/test/scala/tutorial_1/IotApp.scala b/akka-docs/src/test/scala/tutorial_2/IotApp.scala similarity index 90% rename from akka-docs/src/test/scala/tutorial_1/IotApp.scala rename to akka-docs/src/test/scala/tutorial_2/IotApp.scala index e62c542567..7e510b495e 100644 --- a/akka-docs/src/test/scala/tutorial_1/IotApp.scala +++ b/akka-docs/src/test/scala/tutorial_2/IotApp.scala @@ -1,9 +1,11 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_1 +package tutorial_2 //#iot-app +package com.lightbend.akka.sample + import akka.actor.ActorSystem import scala.io.StdIn diff --git a/akka-docs/src/test/scala/tutorial_1/IotSupervisor.scala b/akka-docs/src/test/scala/tutorial_2/IotSupervisor.scala similarity index 90% rename from akka-docs/src/test/scala/tutorial_1/IotSupervisor.scala rename to akka-docs/src/test/scala/tutorial_2/IotSupervisor.scala index 28beb4d9d1..9dc1da970e 100644 --- a/akka-docs/src/test/scala/tutorial_1/IotSupervisor.scala +++ b/akka-docs/src/test/scala/tutorial_2/IotSupervisor.scala @@ -1,9 +1,11 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_1 +package tutorial_2 //#iot-supervisor +package com.lightbend.akka.sample + import akka.actor.{ Actor, ActorLogging, Props } object IotSupervisor { diff --git a/akka-docs/src/test/scala/tutorial_3/Device.scala b/akka-docs/src/test/scala/tutorial_3/Device.scala index 754d483e9f..577406002f 100644 --- a/akka-docs/src/test/scala/tutorial_3/Device.scala +++ b/akka-docs/src/test/scala/tutorial_3/Device.scala @@ -3,11 +3,9 @@ */ package tutorial_3 +//#full-device import akka.actor.{ Actor, ActorLogging, Props } -import tutorial_3.Device.{ ReadTemperature, RecordTemperature, RespondTemperature, TemperatureRecorded } -import tutorial_3.DeviceManager.{ DeviceRegistered, RequestTrackDevice } -//#device-with-register object Device { def props(groupId: String, deviceId: String): Props = Props(new Device(groupId, deviceId)) @@ -19,22 +17,13 @@ object Device { } class Device(groupId: String, deviceId: String) extends Actor with ActorLogging { + import Device._ var lastTemperatureReading: Option[Double] = None override def preStart(): Unit = log.info("Device actor {}-{} started", groupId, deviceId) - override def postStop(): Unit = log.info("Device actor {}-{} stopped", groupId, deviceId) override def receive: Receive = { - case RequestTrackDevice(`groupId`, `deviceId`) => - sender() ! DeviceRegistered - - case RequestTrackDevice(groupId, deviceId) => - log.warning( - "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", - groupId, deviceId, this.groupId, this.deviceId - ) - case RecordTemperature(id, value) => log.info("Recorded temperature reading {} with {}", value, id) lastTemperatureReading = Some(value) @@ -44,4 +33,4 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging sender() ! RespondTemperature(id, lastTemperatureReading) } } -//#device-with-register +//#full-device diff --git a/akka-docs/src/test/scala/tutorial_2/DeviceInProgress.scala b/akka-docs/src/test/scala/tutorial_3/DeviceInProgress.scala similarity index 91% rename from akka-docs/src/test/scala/tutorial_2/DeviceInProgress.scala rename to akka-docs/src/test/scala/tutorial_3/DeviceInProgress.scala index c0baf96a3b..3cc2377935 100644 --- a/akka-docs/src/test/scala/tutorial_2/DeviceInProgress.scala +++ b/akka-docs/src/test/scala/tutorial_3/DeviceInProgress.scala @@ -1,6 +1,4 @@ -package tutorial_2 - -import tutorial_5.Device.{ ReadTemperature, RecordTemperature, RespondTemperature, TemperatureRecorded } +package tutorial_3 object DeviceInProgress1 { @@ -28,6 +26,8 @@ object DeviceInProgress2 { } class Device(groupId: String, deviceId: String) extends Actor with ActorLogging { + import Device._ + var lastTemperatureReading: Option[Double] = None override def preStart(): Unit = log.info("Device actor {}-{} started", groupId, deviceId) diff --git a/akka-docs/src/test/scala/tutorial_3/DeviceSpec.scala b/akka-docs/src/test/scala/tutorial_3/DeviceSpec.scala index d9bd7e5c47..b08e61db66 100644 --- a/akka-docs/src/test/scala/tutorial_3/DeviceSpec.scala +++ b/akka-docs/src/test/scala/tutorial_3/DeviceSpec.scala @@ -11,28 +11,7 @@ class DeviceSpec extends AkkaSpec { "Device actor" must { - //#device-registration-tests - "reply to registration requests" in { - val probe = TestProbe() - val deviceActor = system.actorOf(Device.props("group", "device")) - - deviceActor.tell(DeviceManager.RequestTrackDevice("group", "device"), probe.ref) - probe.expectMsg(DeviceManager.DeviceRegistered) - probe.lastSender should ===(deviceActor) - } - - "ignore wrong registration requests" in { - val probe = TestProbe() - val deviceActor = system.actorOf(Device.props("group", "device")) - - deviceActor.tell(DeviceManager.RequestTrackDevice("wrongGroup", "device"), probe.ref) - probe.expectNoMsg(500.milliseconds) - - deviceActor.tell(DeviceManager.RequestTrackDevice("group", "Wrongdevice"), probe.ref) - probe.expectNoMsg(500.milliseconds) - } - //#device-registration-tests - + //#device-read-test "reply with empty reading if no temperature is known" in { val probe = TestProbe() val deviceActor = system.actorOf(Device.props("group", "device")) @@ -42,7 +21,9 @@ class DeviceSpec extends AkkaSpec { response.requestId should ===(42) response.value should ===(None) } + //#device-read-test + //#device-write-read-test "reply with latest temperature reading" in { val probe = TestProbe() val deviceActor = system.actorOf(Device.props("group", "device")) @@ -63,6 +44,7 @@ class DeviceSpec extends AkkaSpec { response2.requestId should ===(4) response2.value should ===(Some(55.0)) } + //#device-write-read-test } diff --git a/akka-docs/src/test/scala/tutorial_4/Device.scala b/akka-docs/src/test/scala/tutorial_4/Device.scala index 5082ddb5f1..640920f91f 100644 --- a/akka-docs/src/test/scala/tutorial_4/Device.scala +++ b/akka-docs/src/test/scala/tutorial_4/Device.scala @@ -4,11 +4,9 @@ package tutorial_4 import akka.actor.{ Actor, ActorLogging, Props } -import tutorial_4.Device.{ ReadTemperature, RecordTemperature, RespondTemperature, TemperatureRecorded } -import tutorial_4.DeviceManager.{ DeviceRegistered, RequestTrackDevice } +//#device-with-register object Device { - def props(groupId: String, deviceId: String): Props = Props(new Device(groupId, deviceId)) final case class RecordTemperature(requestId: Long, value: Double) @@ -19,6 +17,8 @@ object Device { } class Device(groupId: String, deviceId: String) extends Actor with ActorLogging { + import Device._ + var lastTemperatureReading: Option[Double] = None override def preStart(): Unit = log.info("Device actor {}-{} started", groupId, deviceId) @@ -26,10 +26,10 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging override def postStop(): Unit = log.info("Device actor {}-{} stopped", groupId, deviceId) override def receive: Receive = { - case RequestTrackDevice(`groupId`, `deviceId`) => - sender() ! DeviceRegistered + case DeviceManager.RequestTrackDevice(`groupId`, `deviceId`) => + sender() ! DeviceManager.DeviceRegistered - case RequestTrackDevice(groupId, deviceId) => + case DeviceManager.RequestTrackDevice(groupId, deviceId) => log.warning( "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", groupId, deviceId, this.groupId, this.deviceId @@ -44,3 +44,4 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging sender() ! RespondTemperature(id, lastTemperatureReading) } } +//#device-with-register diff --git a/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala b/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala index 5b179c6583..1b4498bee0 100644 --- a/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala +++ b/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala @@ -4,52 +4,49 @@ package tutorial_4 import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } -import tutorial_4.DeviceGroup._ -import tutorial_4.DeviceManager.RequestTrackDevice +import DeviceGroup._ +import DeviceManager.RequestTrackDevice import scala.concurrent.duration._ +//#device-group-full +//#device-group-register object DeviceGroup { def props(groupId: String): Props = Props(new DeviceGroup(groupId)) + //#device-group-register final case class RequestDeviceList(requestId: Long) final case class ReplyDeviceList(requestId: Long, ids: Set[String]) - - //#query-protocol - final case class RequestAllTemperatures(requestId: Long) - final case class RespondAllTemperatures(requestId: Long, temperatures: Map[String, TemperatureReading]) - - sealed trait TemperatureReading - final case class Temperature(value: Double) extends TemperatureReading - case object TemperatureNotAvailable extends TemperatureReading - case object DeviceNotAvailable extends TemperatureReading - case object DeviceTimedOut extends TemperatureReading - //#query-protocol + //#device-group-register } +//#device-group-register +//#device-group-register +//#device-group-remove -//#query-added class DeviceGroup(groupId: String) extends Actor with ActorLogging { var deviceIdToActor = Map.empty[String, ActorRef] + //#device-group-register var actorToDeviceId = Map.empty[ActorRef, String] - var nextCollectionId = 0L + //#device-group-register override def preStart(): Unit = log.info("DeviceGroup {} started", groupId) override def postStop(): Unit = log.info("DeviceGroup {} stopped", groupId) override def receive: Receive = { - //#query-added case trackMsg @ RequestTrackDevice(`groupId`, _) => deviceIdToActor.get(trackMsg.deviceId) match { - case Some(ref) => - ref forward trackMsg + case Some(deviceActor) => + deviceActor forward trackMsg case None => log.info("Creating device actor for {}", trackMsg.deviceId) - val deviceActor = context.actorOf(Device.props(groupId, trackMsg.deviceId), "device-" + trackMsg.deviceId) + val deviceActor = context.actorOf(Device.props(groupId, trackMsg.deviceId), s"device-${trackMsg.deviceId}") + //#device-group-register context.watch(deviceActor) - deviceActor forward trackMsg - deviceIdToActor += trackMsg.deviceId -> deviceActor actorToDeviceId += deviceActor -> trackMsg.deviceId + //#device-group-register + deviceIdToActor += trackMsg.deviceId -> deviceActor + deviceActor forward trackMsg } case RequestTrackDevice(groupId, deviceId) => @@ -57,9 +54,12 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { "Ignoring TrackDevice request for {}. This actor is responsible for {}.", groupId, this.groupId ) + //#device-group-register + //#device-group-remove case RequestDeviceList(requestId) => sender() ! ReplyDeviceList(requestId, deviceIdToActor.keySet) + //#device-group-remove case Terminated(deviceActor) => val deviceId = actorToDeviceId(deviceActor) @@ -67,17 +67,9 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { actorToDeviceId -= deviceActor deviceIdToActor -= deviceId - //#query-added - // ... other cases omitted - - case RequestAllTemperatures(requestId) => - context.actorOf(DeviceGroupQuery.props( - actorToDeviceId = actorToDeviceId, - requestId = requestId, - requester = sender(), - 3.seconds - )) + //#device-group-register } - } -//#query-added +//#device-group-remove +//#device-group-register +//#device-group-full diff --git a/akka-docs/src/test/scala/tutorial_4/DeviceGroupSpec.scala b/akka-docs/src/test/scala/tutorial_4/DeviceGroupSpec.scala index 24a069e0cd..6eb85c04f0 100644 --- a/akka-docs/src/test/scala/tutorial_4/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/tutorial_4/DeviceGroupSpec.scala @@ -13,6 +13,7 @@ class DeviceGroupSpec extends AkkaSpec { "DeviceGroup actor" must { + //#device-group-test-registration "be able to register a device actor" in { val probe = TestProbe() val groupActor = system.actorOf(DeviceGroup.props("group")) @@ -40,7 +41,9 @@ class DeviceGroupSpec extends AkkaSpec { groupActor.tell(DeviceManager.RequestTrackDevice("wrongGroup", "device1"), probe.ref) probe.expectNoMsg(500.milliseconds) } + //#device-group-test-registration + //#device-group-test3 "return same actor for same deviceId" in { val probe = TestProbe() val groupActor = system.actorOf(DeviceGroup.props("group")) @@ -55,7 +58,9 @@ class DeviceGroupSpec extends AkkaSpec { deviceActor1 should ===(deviceActor2) } + //#device-group-test3 + //#device-group-list-terminate-test "be able to list active devices" in { val probe = TestProbe() val groupActor = system.actorOf(DeviceGroup.props("group")) @@ -95,41 +100,7 @@ class DeviceGroupSpec extends AkkaSpec { probe.expectMsg(DeviceGroup.ReplyDeviceList(requestId = 1, Set("device2"))) } } - - //#group-query-integration-test - "be able to collect temperatures from all active devices" in { - val probe = TestProbe() - val groupActor = system.actorOf(DeviceGroup.props("group")) - - groupActor.tell(DeviceManager.RequestTrackDevice("group", "device1"), probe.ref) - probe.expectMsg(DeviceManager.DeviceRegistered) - val deviceActor1 = probe.lastSender - - groupActor.tell(DeviceManager.RequestTrackDevice("group", "device2"), probe.ref) - probe.expectMsg(DeviceManager.DeviceRegistered) - val deviceActor2 = probe.lastSender - - groupActor.tell(DeviceManager.RequestTrackDevice("group", "device3"), probe.ref) - probe.expectMsg(DeviceManager.DeviceRegistered) - val deviceActor3 = probe.lastSender - - // Check that the device actors are working - deviceActor1.tell(Device.RecordTemperature(requestId = 0, 1.0), probe.ref) - probe.expectMsg(Device.TemperatureRecorded(requestId = 0)) - deviceActor2.tell(Device.RecordTemperature(requestId = 1, 2.0), probe.ref) - probe.expectMsg(Device.TemperatureRecorded(requestId = 1)) - // No temperature for device3 - - groupActor.tell(DeviceGroup.RequestAllTemperatures(requestId = 0), probe.ref) - probe.expectMsg( - DeviceGroup.RespondAllTemperatures( - requestId = 0, - temperatures = Map( - "device1" -> DeviceGroup.Temperature(1.0), - "device2" -> DeviceGroup.Temperature(2.0), - "device3" -> DeviceGroup.TemperatureNotAvailable))) - } - //#group-query-integration-test + //#device-group-list-terminate-test } diff --git a/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala b/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala index 5d4caf16ae..ab8b0852e6 100644 --- a/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala +++ b/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala @@ -5,13 +5,16 @@ package tutorial_4 import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } -import tutorial_4.DeviceManager.RequestTrackDevice +import DeviceManager.RequestTrackDevice +//#device-manager-full object DeviceManager { def props(): Props = Props(new DeviceManager) + //#device-manager-msgs final case class RequestTrackDevice(groupId: String, deviceId: String) case object DeviceRegistered + //#device-manager-msgs } class DeviceManager extends Actor with ActorLogging { @@ -45,3 +48,4 @@ class DeviceManager extends Actor with ActorLogging { } } +//#device-manager-full diff --git a/akka-docs/src/test/scala/tutorial_2/DeviceSpec.scala b/akka-docs/src/test/scala/tutorial_4/DeviceSpec.scala similarity index 64% rename from akka-docs/src/test/scala/tutorial_2/DeviceSpec.scala rename to akka-docs/src/test/scala/tutorial_4/DeviceSpec.scala index 2bb7ea3258..26472f777c 100644 --- a/akka-docs/src/test/scala/tutorial_2/DeviceSpec.scala +++ b/akka-docs/src/test/scala/tutorial_4/DeviceSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_2 +package tutorial_4 import akka.testkit.{ AkkaSpec, TestProbe } @@ -11,7 +11,28 @@ class DeviceSpec extends AkkaSpec { "Device actor" must { - //#device-read-test + //#device-registration-tests + "reply to registration requests" in { + val probe = TestProbe() + val deviceActor = system.actorOf(Device.props("group", "device")) + + deviceActor.tell(DeviceManager.RequestTrackDevice("group", "device"), probe.ref) + probe.expectMsg(DeviceManager.DeviceRegistered) + probe.lastSender should ===(deviceActor) + } + + "ignore wrong registration requests" in { + val probe = TestProbe() + val deviceActor = system.actorOf(Device.props("group", "device")) + + deviceActor.tell(DeviceManager.RequestTrackDevice("wrongGroup", "device"), probe.ref) + probe.expectNoMsg(500.milliseconds) + + deviceActor.tell(DeviceManager.RequestTrackDevice("group", "Wrongdevice"), probe.ref) + probe.expectNoMsg(500.milliseconds) + } + //#device-registration-tests + "reply with empty reading if no temperature is known" in { val probe = TestProbe() val deviceActor = system.actorOf(Device.props("group", "device")) @@ -21,9 +42,7 @@ class DeviceSpec extends AkkaSpec { response.requestId should ===(42) response.value should ===(None) } - //#device-read-test - //#device-write-read-test "reply with latest temperature reading" in { val probe = TestProbe() val deviceActor = system.actorOf(Device.props("group", "device")) @@ -44,7 +63,6 @@ class DeviceSpec extends AkkaSpec { response2.requestId should ===(4) response2.value should ===(Some(55.0)) } - //#device-write-read-test } diff --git a/akka-docs/src/test/scala/tutorial_5/Device.scala b/akka-docs/src/test/scala/tutorial_5/Device.scala index f7a4a8ee21..d41c7aef97 100644 --- a/akka-docs/src/test/scala/tutorial_5/Device.scala +++ b/akka-docs/src/test/scala/tutorial_5/Device.scala @@ -4,8 +4,6 @@ package tutorial_5 import akka.actor.{ Actor, ActorLogging, Props } -import tutorial_5.Device.{ ReadTemperature, RecordTemperature, RespondTemperature, TemperatureRecorded } -import tutorial_5.DeviceManager.{ DeviceRegistered, RequestTrackDevice } object Device { @@ -19,6 +17,8 @@ object Device { } class Device(groupId: String, deviceId: String) extends Actor with ActorLogging { + import Device._ + var lastTemperatureReading: Option[Double] = None override def preStart(): Unit = log.info("Device actor {}-{} started", groupId, deviceId) @@ -26,10 +26,10 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging override def postStop(): Unit = log.info("Device actor {}-{} stopped", groupId, deviceId) override def receive: Receive = { - case RequestTrackDevice(`groupId`, `deviceId`) => - sender() ! DeviceRegistered + case DeviceManager.RequestTrackDevice(`groupId`, `deviceId`) => + sender() ! DeviceManager.DeviceRegistered - case RequestTrackDevice(groupId, deviceId) => + case DeviceManager.RequestTrackDevice(groupId, deviceId) => log.warning( "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", groupId, deviceId, this.groupId, this.deviceId diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala index afae95ea08..02ea60141c 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala @@ -4,17 +4,18 @@ package tutorial_5 import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } -import tutorial_5.DeviceGroup._ -import tutorial_5.DeviceManager.RequestTrackDevice +import DeviceGroup._ +import DeviceManager.RequestTrackDevice + import scala.concurrent.duration._ object DeviceGroup { - def props(groupId: String): Props = Props(new DeviceGroup(groupId)) final case class RequestDeviceList(requestId: Long) final case class ReplyDeviceList(requestId: Long, ids: Set[String]) + //#query-protocol final case class RequestAllTemperatures(requestId: Long) final case class RespondAllTemperatures(requestId: Long, temperatures: Map[String, TemperatureReading]) @@ -23,8 +24,10 @@ object DeviceGroup { case object TemperatureNotAvailable extends TemperatureReading case object DeviceNotAvailable extends TemperatureReading case object DeviceTimedOut extends TemperatureReading + //#query-protocol } +//#query-added class DeviceGroup(groupId: String) extends Actor with ActorLogging { var deviceIdToActor = Map.empty[String, ActorRef] var actorToDeviceId = Map.empty[ActorRef, String] @@ -35,7 +38,7 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { override def postStop(): Unit = log.info("DeviceGroup {} stopped", groupId) override def receive: Receive = { - // Note the backticks + //#query-added case trackMsg @ RequestTrackDevice(`groupId`, _) => deviceIdToActor.get(trackMsg.deviceId) match { case Some(ref) => @@ -64,6 +67,9 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { actorToDeviceId -= deviceActor deviceIdToActor -= deviceId + //#query-added + // ... other cases omitted + case RequestAllTemperatures(requestId) => context.actorOf(DeviceGroupQuery.props( actorToDeviceId = actorToDeviceId, @@ -74,3 +80,4 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { } } +//#query-added diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala index d670c7c9a7..7ca1f4ccc8 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala @@ -3,13 +3,13 @@ */ package tutorial_5 -import akka.actor.Actor.Receive import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } import scala.concurrent.duration._ +//#query-full +//#query-outline object DeviceGroupQuery { - case object CollectionTimeout def props( @@ -37,13 +37,14 @@ class DeviceGroupQuery( context.watch(deviceActor) deviceActor ! Device.ReadTemperature(0) } - } override def postStop(): Unit = { queryTimeoutTimer.cancel() } + //#query-outline + //#query-state override def receive: Receive = waitingForReplies( Map.empty, @@ -63,9 +64,7 @@ class DeviceGroupQuery( receivedResponse(deviceActor, reading, stillWaiting, repliesSoFar) case Terminated(deviceActor) => - if (stillWaiting.contains(deviceActor)) - receivedResponse(deviceActor, DeviceGroup.DeviceNotAvailable, stillWaiting, repliesSoFar) - // else ignore + receivedResponse(deviceActor, DeviceGroup.DeviceNotAvailable, stillWaiting, repliesSoFar) case CollectionTimeout => val timedOutReplies = @@ -76,13 +75,16 @@ class DeviceGroupQuery( requester ! DeviceGroup.RespondAllTemperatures(requestId, repliesSoFar ++ timedOutReplies) context.stop(self) } + //#query-state + //#query-collect-reply def receivedResponse( deviceActor: ActorRef, reading: DeviceGroup.TemperatureReading, stillWaiting: Set[ActorRef], repliesSoFar: Map[String, DeviceGroup.TemperatureReading] ): Unit = { + context.unwatch(deviceActor) val deviceId = actorToDeviceId(deviceActor) val newStillWaiting = stillWaiting - deviceActor @@ -94,5 +96,9 @@ class DeviceGroupQuery( context.become(waitingForReplies(newRepliesSoFar, newStillWaiting)) } } + //#query-collect-reply + //#query-outline } +//#query-outline +//#query-full diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala index 8b423f680c..63f1890932 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala @@ -12,6 +12,7 @@ class DeviceGroupQuerySpec extends AkkaSpec { "DeviceGroupQuery" must { + //#query-test-normal "return temperature value for working devices" in { val requester = TestProbe() @@ -39,7 +40,9 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } + //#query-test-normal + //#query-test-no-reading "return TemperatureNotAvailable for devices with no readings" in { val requester = TestProbe() @@ -67,7 +70,9 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } + //#query-test-no-reading + //#query-test-stopped "return DeviceNotAvailable if device stops before answering" in { val requester = TestProbe() @@ -95,7 +100,9 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } + //#query-test-stopped + //#query-test-stopped-later "return temperature reading even if device stops after answering" in { val requester = TestProbe() @@ -124,7 +131,9 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } + //#query-test-stopped-later + //#query-test-timeout "return DeviceTimedOut if device does not answer in time" in { val requester = TestProbe() @@ -151,6 +160,7 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } + //#query-test-timeout } diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala index 3faab1fdd5..6be065d2b9 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala @@ -96,6 +96,7 @@ class DeviceGroupSpec extends AkkaSpec { } } + //#group-query-integration-test "be able to collect temperatures from all active devices" in { val probe = TestProbe() val groupActor = system.actorOf(DeviceGroup.props("group")) @@ -128,6 +129,7 @@ class DeviceGroupSpec extends AkkaSpec { "device2" -> DeviceGroup.Temperature(2.0), "device3" -> DeviceGroup.TemperatureNotAvailable))) } + //#group-query-integration-test } diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceManager.scala b/akka-docs/src/test/scala/tutorial_5/DeviceManager.scala index e9df70ac34..66023d5595 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceManager.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceManager.scala @@ -5,7 +5,7 @@ package tutorial_5 import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } -import tutorial_5.DeviceManager.RequestTrackDevice +import DeviceManager.RequestTrackDevice object DeviceManager { def props(): Props = Props(new DeviceManager) diff --git a/akka-docs/src/test/scala/tutorial_2/Device.scala b/akka-docs/src/test/scala/tutorial_6/Device.scala similarity index 75% rename from akka-docs/src/test/scala/tutorial_2/Device.scala rename to akka-docs/src/test/scala/tutorial_6/Device.scala index 7ee720606d..2e0db3cd30 100644 --- a/akka-docs/src/test/scala/tutorial_2/Device.scala +++ b/akka-docs/src/test/scala/tutorial_6/Device.scala @@ -1,12 +1,12 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_2 +package tutorial_6 -//#full-device import akka.actor.{ Actor, ActorLogging, Props } object Device { + def props(groupId: String, deviceId: String): Props = Props(new Device(groupId, deviceId)) final case class RecordTemperature(requestId: Long, value: Double) @@ -18,12 +18,23 @@ object Device { class Device(groupId: String, deviceId: String) extends Actor with ActorLogging { import Device._ + var lastTemperatureReading: Option[Double] = None override def preStart(): Unit = log.info("Device actor {}-{} started", groupId, deviceId) + override def postStop(): Unit = log.info("Device actor {}-{} stopped", groupId, deviceId) override def receive: Receive = { + case DeviceManager.RequestTrackDevice(`groupId`, `deviceId`) => + sender() ! DeviceManager.DeviceRegistered + + case DeviceManager.RequestTrackDevice(groupId, deviceId) => + log.warning( + "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", + groupId, deviceId, this.groupId, this.deviceId + ) + case RecordTemperature(id, value) => log.info("Recorded temperature reading {} with {}", value, id) lastTemperatureReading = Some(value) @@ -33,4 +44,3 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging sender() ! RespondTemperature(id, lastTemperatureReading) } } -//#full-device diff --git a/akka-docs/src/test/scala/tutorial_3/DeviceGroup.scala b/akka-docs/src/test/scala/tutorial_6/DeviceGroup.scala similarity index 65% rename from akka-docs/src/test/scala/tutorial_3/DeviceGroup.scala rename to akka-docs/src/test/scala/tutorial_6/DeviceGroup.scala index 7eb6cb7e68..30f2ac7f6e 100644 --- a/akka-docs/src/test/scala/tutorial_3/DeviceGroup.scala +++ b/akka-docs/src/test/scala/tutorial_6/DeviceGroup.scala @@ -1,52 +1,52 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_3 +package tutorial_6 import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } -import tutorial_3.DeviceGroup._ -import tutorial_3.DeviceManager.RequestTrackDevice - +import DeviceGroup._ +import DeviceManager.RequestTrackDevice import scala.concurrent.duration._ -//#device-group-full -//#device-group-register object DeviceGroup { + def props(groupId: String): Props = Props(new DeviceGroup(groupId)) - //#device-group-register final case class RequestDeviceList(requestId: Long) final case class ReplyDeviceList(requestId: Long, ids: Set[String]) - //#device-group-register + + final case class RequestAllTemperatures(requestId: Long) + final case class RespondAllTemperatures(requestId: Long, temperatures: Map[String, TemperatureReading]) + + sealed trait TemperatureReading + final case class Temperature(value: Double) extends TemperatureReading + case object TemperatureNotAvailable extends TemperatureReading + case object DeviceNotAvailable extends TemperatureReading + case object DeviceTimedOut extends TemperatureReading } -//#device-group-register -//#device-group-register -//#device-group-remove class DeviceGroup(groupId: String) extends Actor with ActorLogging { var deviceIdToActor = Map.empty[String, ActorRef] - //#device-group-register var actorToDeviceId = Map.empty[ActorRef, String] - //#device-group-register + var nextCollectionId = 0L override def preStart(): Unit = log.info("DeviceGroup {} started", groupId) override def postStop(): Unit = log.info("DeviceGroup {} stopped", groupId) override def receive: Receive = { + // Note the backticks case trackMsg @ RequestTrackDevice(`groupId`, _) => deviceIdToActor.get(trackMsg.deviceId) match { - case Some(deviceActor) => - deviceActor forward trackMsg + case Some(ref) => + ref forward trackMsg case None => log.info("Creating device actor for {}", trackMsg.deviceId) - val deviceActor = context.actorOf(Device.props(groupId, trackMsg.deviceId), s"device-${trackMsg.deviceId}") - //#device-group-register + val deviceActor = context.actorOf(Device.props(groupId, trackMsg.deviceId), "device-" + trackMsg.deviceId) context.watch(deviceActor) - actorToDeviceId += deviceActor -> trackMsg.deviceId - //#device-group-register - deviceIdToActor += trackMsg.deviceId -> deviceActor deviceActor forward trackMsg + deviceIdToActor += trackMsg.deviceId -> deviceActor + actorToDeviceId += deviceActor -> trackMsg.deviceId } case RequestTrackDevice(groupId, deviceId) => @@ -54,12 +54,9 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { "Ignoring TrackDevice request for {}. This actor is responsible for {}.", groupId, this.groupId ) - //#device-group-register - //#device-group-remove case RequestDeviceList(requestId) => sender() ! ReplyDeviceList(requestId, deviceIdToActor.keySet) - //#device-group-remove case Terminated(deviceActor) => val deviceId = actorToDeviceId(deviceActor) @@ -67,9 +64,13 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { actorToDeviceId -= deviceActor deviceIdToActor -= deviceId - //#device-group-register + case RequestAllTemperatures(requestId) => + context.actorOf(DeviceGroupQuery.props( + actorToDeviceId = actorToDeviceId, + requestId = requestId, + requester = sender(), + 3.seconds + )) } + } -//#device-group-remove -//#device-group-register -//#device-group-full diff --git a/akka-docs/src/test/scala/tutorial_4/DeviceGroupQuery.scala b/akka-docs/src/test/scala/tutorial_6/DeviceGroupQuery.scala similarity index 88% rename from akka-docs/src/test/scala/tutorial_4/DeviceGroupQuery.scala rename to akka-docs/src/test/scala/tutorial_6/DeviceGroupQuery.scala index 7614f145b9..5bf011acbc 100644 --- a/akka-docs/src/test/scala/tutorial_4/DeviceGroupQuery.scala +++ b/akka-docs/src/test/scala/tutorial_6/DeviceGroupQuery.scala @@ -1,15 +1,15 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_4 +package tutorial_6 +import akka.actor.Actor.Receive import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } import scala.concurrent.duration._ -//#query-full -//#query-outline object DeviceGroupQuery { + case object CollectionTimeout def props( @@ -37,14 +37,13 @@ class DeviceGroupQuery( context.watch(deviceActor) deviceActor ! Device.ReadTemperature(0) } + } override def postStop(): Unit = { queryTimeoutTimer.cancel() } - //#query-outline - //#query-state override def receive: Receive = waitingForReplies( Map.empty, @@ -64,7 +63,9 @@ class DeviceGroupQuery( receivedResponse(deviceActor, reading, stillWaiting, repliesSoFar) case Terminated(deviceActor) => - receivedResponse(deviceActor, DeviceGroup.DeviceNotAvailable, stillWaiting, repliesSoFar) + if (stillWaiting.contains(deviceActor)) + receivedResponse(deviceActor, DeviceGroup.DeviceNotAvailable, stillWaiting, repliesSoFar) + // else ignore case CollectionTimeout => val timedOutReplies = @@ -75,16 +76,13 @@ class DeviceGroupQuery( requester ! DeviceGroup.RespondAllTemperatures(requestId, repliesSoFar ++ timedOutReplies) context.stop(self) } - //#query-state - //#query-collect-reply def receivedResponse( deviceActor: ActorRef, reading: DeviceGroup.TemperatureReading, stillWaiting: Set[ActorRef], repliesSoFar: Map[String, DeviceGroup.TemperatureReading] ): Unit = { - context.unwatch(deviceActor) val deviceId = actorToDeviceId(deviceActor) val newStillWaiting = stillWaiting - deviceActor @@ -96,9 +94,5 @@ class DeviceGroupQuery( context.become(waitingForReplies(newRepliesSoFar, newStillWaiting)) } } - //#query-collect-reply - //#query-outline } -//#query-outline -//#query-full diff --git a/akka-docs/src/test/scala/tutorial_4/DeviceGroupQuerySpec.scala b/akka-docs/src/test/scala/tutorial_6/DeviceGroupQuerySpec.scala similarity index 94% rename from akka-docs/src/test/scala/tutorial_4/DeviceGroupQuerySpec.scala rename to akka-docs/src/test/scala/tutorial_6/DeviceGroupQuerySpec.scala index 2ee67d2fc5..d6c068e08e 100644 --- a/akka-docs/src/test/scala/tutorial_4/DeviceGroupQuerySpec.scala +++ b/akka-docs/src/test/scala/tutorial_6/DeviceGroupQuerySpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_4 +package tutorial_6 import akka.actor.PoisonPill import akka.testkit.{ AkkaSpec, TestProbe } @@ -12,7 +12,6 @@ class DeviceGroupQuerySpec extends AkkaSpec { "DeviceGroupQuery" must { - //#query-test-normal "return temperature value for working devices" in { val requester = TestProbe() @@ -40,9 +39,7 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } - //#query-test-normal - //#query-test-no-reading "return TemperatureNotAvailable for devices with no readings" in { val requester = TestProbe() @@ -70,9 +67,7 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } - //#query-test-no-reading - //#query-test-stopped "return DeviceNotAvailable if device stops before answering" in { val requester = TestProbe() @@ -100,9 +95,7 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } - //#query-test-stopped - //#query-test-stopped-later "return temperature reading even if device stops after answering" in { val requester = TestProbe() @@ -131,9 +124,7 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } - //#query-test-stopped-later - //#query-test-timeout "return DeviceTimedOut if device does not answer in time" in { val requester = TestProbe() @@ -160,7 +151,6 @@ class DeviceGroupQuerySpec extends AkkaSpec { ) )) } - //#query-test-timeout } diff --git a/akka-docs/src/test/scala/tutorial_3/DeviceGroupSpec.scala b/akka-docs/src/test/scala/tutorial_6/DeviceGroupSpec.scala similarity index 71% rename from akka-docs/src/test/scala/tutorial_3/DeviceGroupSpec.scala rename to akka-docs/src/test/scala/tutorial_6/DeviceGroupSpec.scala index e40295b40c..3f83cc811e 100644 --- a/akka-docs/src/test/scala/tutorial_3/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/tutorial_6/DeviceGroupSpec.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_3 +package tutorial_6 import akka.actor.PoisonPill import akka.testkit.{ AkkaSpec, TestProbe } @@ -13,7 +13,6 @@ class DeviceGroupSpec extends AkkaSpec { "DeviceGroup actor" must { - //#device-group-test-registration "be able to register a device actor" in { val probe = TestProbe() val groupActor = system.actorOf(DeviceGroup.props("group")) @@ -41,9 +40,7 @@ class DeviceGroupSpec extends AkkaSpec { groupActor.tell(DeviceManager.RequestTrackDevice("wrongGroup", "device1"), probe.ref) probe.expectNoMsg(500.milliseconds) } - //#device-group-test-registration - //#device-group-test3 "return same actor for same deviceId" in { val probe = TestProbe() val groupActor = system.actorOf(DeviceGroup.props("group")) @@ -58,9 +55,7 @@ class DeviceGroupSpec extends AkkaSpec { deviceActor1 should ===(deviceActor2) } - //#device-group-test3 - //#device-group-list-terminate-test "be able to list active devices" in { val probe = TestProbe() val groupActor = system.actorOf(DeviceGroup.props("group")) @@ -100,7 +95,39 @@ class DeviceGroupSpec extends AkkaSpec { probe.expectMsg(DeviceGroup.ReplyDeviceList(requestId = 1, Set("device2"))) } } - //#device-group-list-terminate-test + + "be able to collect temperatures from all active devices" in { + val probe = TestProbe() + val groupActor = system.actorOf(DeviceGroup.props("group")) + + groupActor.tell(DeviceManager.RequestTrackDevice("group", "device1"), probe.ref) + probe.expectMsg(DeviceManager.DeviceRegistered) + val deviceActor1 = probe.lastSender + + groupActor.tell(DeviceManager.RequestTrackDevice("group", "device2"), probe.ref) + probe.expectMsg(DeviceManager.DeviceRegistered) + val deviceActor2 = probe.lastSender + + groupActor.tell(DeviceManager.RequestTrackDevice("group", "device3"), probe.ref) + probe.expectMsg(DeviceManager.DeviceRegistered) + val deviceActor3 = probe.lastSender + + // Check that the device actors are working + deviceActor1.tell(Device.RecordTemperature(requestId = 0, 1.0), probe.ref) + probe.expectMsg(Device.TemperatureRecorded(requestId = 0)) + deviceActor2.tell(Device.RecordTemperature(requestId = 1, 2.0), probe.ref) + probe.expectMsg(Device.TemperatureRecorded(requestId = 1)) + // No temperature for device3 + + groupActor.tell(DeviceGroup.RequestAllTemperatures(requestId = 0), probe.ref) + probe.expectMsg( + DeviceGroup.RespondAllTemperatures( + requestId = 0, + temperatures = Map( + "device1" -> DeviceGroup.Temperature(1.0), + "device2" -> DeviceGroup.Temperature(2.0), + "device3" -> DeviceGroup.TemperatureNotAvailable))) + } } diff --git a/akka-docs/src/test/scala/tutorial_3/DeviceManager.scala b/akka-docs/src/test/scala/tutorial_6/DeviceManager.scala similarity index 89% rename from akka-docs/src/test/scala/tutorial_3/DeviceManager.scala rename to akka-docs/src/test/scala/tutorial_6/DeviceManager.scala index 123bc03d86..fa0ce9edae 100644 --- a/akka-docs/src/test/scala/tutorial_3/DeviceManager.scala +++ b/akka-docs/src/test/scala/tutorial_6/DeviceManager.scala @@ -2,19 +2,16 @@ * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_3 +package tutorial_6 import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } -import tutorial_3.DeviceManager.RequestTrackDevice +import DeviceManager.RequestTrackDevice -//#device-manager-full object DeviceManager { def props(): Props = Props(new DeviceManager) - //#device-manager-msgs final case class RequestTrackDevice(groupId: String, deviceId: String) case object DeviceRegistered - //#device-manager-msgs } class DeviceManager extends Actor with ActorLogging { @@ -48,4 +45,3 @@ class DeviceManager extends Actor with ActorLogging { } } -//#device-manager-full diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceSpec.scala b/akka-docs/src/test/scala/tutorial_6/DeviceSpec.scala similarity index 99% rename from akka-docs/src/test/scala/tutorial_5/DeviceSpec.scala rename to akka-docs/src/test/scala/tutorial_6/DeviceSpec.scala index b3d57c1e9e..b9de6dad97 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceSpec.scala +++ b/akka-docs/src/test/scala/tutorial_6/DeviceSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_5 +package tutorial_6 import akka.testkit.{ AkkaSpec, TestProbe } diff --git a/akka-docs/src/test/scala/tutorial_5/IotApp.scala b/akka-docs/src/test/scala/tutorial_6/IotApp.scala similarity index 88% rename from akka-docs/src/test/scala/tutorial_5/IotApp.scala rename to akka-docs/src/test/scala/tutorial_6/IotApp.scala index 059acdd18f..e40baf0ca3 100644 --- a/akka-docs/src/test/scala/tutorial_5/IotApp.scala +++ b/akka-docs/src/test/scala/tutorial_6/IotApp.scala @@ -1,10 +1,10 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_5 +package tutorial_6 import akka.actor.ActorSystem -import tutorial_5.DeviceManager.RequestTrackDevice +import DeviceManager.RequestTrackDevice import scala.io.StdIn diff --git a/akka-docs/src/test/scala/tutorial_5/IotSupervisor.scala b/akka-docs/src/test/scala/tutorial_6/IotSupervisor.scala similarity index 96% rename from akka-docs/src/test/scala/tutorial_5/IotSupervisor.scala rename to akka-docs/src/test/scala/tutorial_6/IotSupervisor.scala index 0f56f0578d..f37e0d0e9a 100644 --- a/akka-docs/src/test/scala/tutorial_5/IotSupervisor.scala +++ b/akka-docs/src/test/scala/tutorial_6/IotSupervisor.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2017 Lightbend Inc. */ -package tutorial_5 +package tutorial_6 import akka.actor.{ Actor, ActorLogging, ActorRef, Props } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala index 51aceed54d..62b262e74c 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -324,9 +324,9 @@ private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) ex } when(Initial, stateTimeout = 10 seconds) { - case Event(Hello(name, addr), _) ⇒ + case Event(Hello(name, address), _) ⇒ roleName = RoleName(name) - controller ! NodeInfo(roleName, addr, self) + controller ! NodeInfo(roleName, address, self) goto(Ready) case Event(x: NetworkOp, _) ⇒ log.warning("client {} sent no Hello in first message (instead {}), disconnecting", getAddrString(channel), x) @@ -426,7 +426,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP val (ip, port) = channel.getRemoteAddress match { case s: InetSocketAddress ⇒ (s.getAddress.getHostAddress, s.getPort) } val name = ip + ":" + port + "-server" + generation.next sender() ! context.actorOf(Props(classOf[ServerFSM], self, channel).withDeploy(Deploy.local), name) - case c @ NodeInfo(name, addr, fsm) ⇒ + case c @ NodeInfo(name, address, fsm) ⇒ barrier forward c if (nodes contains name) { if (initialParticipants > 0) { @@ -442,7 +442,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP initialParticipants = 0 } if (addrInterest contains name) { - addrInterest(name) foreach (_ ! ToClient(AddressReply(name, addr))) + addrInterest(name) foreach (_ ! ToClient(AddressReply(name, address))) addrInterest -= name } } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala index 4e35a14ac4..99027f18aa 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -75,8 +75,8 @@ private[akka] class MsgEncoder extends OneToOneEncoder { case x: NetworkOp ⇒ val w = TCP.Wrapper.newBuilder x match { - case Hello(name, addr) ⇒ - w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(addr)) + case Hello(name, address) ⇒ + w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(address)) case EnterBarrier(name, timeout) ⇒ val barrier = TCP.EnterBarrier.newBuilder.setName(name) timeout foreach (t ⇒ barrier.setTimeout(t.toNanos)) @@ -101,8 +101,8 @@ private[akka] class MsgEncoder extends OneToOneEncoder { w.setFailure(TCP.InjectFailure.newBuilder.setFailure(TCP.FailType.ShutdownAbrupt)) case GetAddress(node) ⇒ w.setAddr(TCP.AddressRequest.newBuilder.setNode(node.name)) - case AddressReply(node, addr) ⇒ - w.setAddr(TCP.AddressRequest.newBuilder.setNode(node.name).setAddr(addr)) + case AddressReply(node, address) ⇒ + w.setAddr(TCP.AddressRequest.newBuilder.setNode(node.name).setAddr(address)) case _: Done ⇒ w.setDone("") } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala index 04e0d98847..c44c5c9379 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala @@ -226,9 +226,9 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) log.warning("did not expect {}", op) } stay using d.copy(runningOp = None) - case AddressReply(node, addr) ⇒ + case AddressReply(node, address) ⇒ runningOp match { - case Some((_, requester)) ⇒ requester ! addr + case Some((_, requester)) ⇒ requester ! address case None ⇒ log.warning("did not expect {}", op) } stay using d.copy(runningOp = None) diff --git a/akka-persistence/src/main/mima-filters/2.4.1.backwards.excludes b/akka-persistence/src/main/mima-filters/2.4.1.backwards.excludes new file mode 100644 index 0000000000..647082b7c4 --- /dev/null +++ b/akka-persistence/src/main/mima-filters/2.4.1.backwards.excludes @@ -0,0 +1,5 @@ +# #19008 +ProblemFilters.exclude[Problem]("akka.persistence.journal.inmem.InmemJournal*") +ProblemFilters.exclude[Problem]("akka.persistence.journal.inmem.InmemStore*") + +ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.PersistenceStash.internalStashOverflowStrategy") diff --git a/akka-persistence/src/main/mima-filters/2.4.11.backwards.excludes b/akka-persistence/src/main/mima-filters/2.4.11.backwards.excludes new file mode 100644 index 0000000000..af712a9535 --- /dev/null +++ b/akka-persistence/src/main/mima-filters/2.4.11.backwards.excludes @@ -0,0 +1,4 @@ +# synthetic method currentEventsByTag$default$2()Long in class akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal has a different result type in current version, where it is akka.persistence.query.Offset rather than Long +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal.currentEventsByTag$default$2") +# synthetic method eventsByTag$default$2()Long in class akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal has a different result type in current version, where it is akka.persistence.query.Offset rather than Long +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal.eventsByTag$default$2") diff --git a/akka-persistence/src/main/mima-filters/2.4.14.backwards.excludes b/akka-persistence/src/main/mima-filters/2.4.14.backwards.excludes new file mode 100644 index 0000000000..476c3e6456 --- /dev/null +++ b/akka-persistence/src/main/mima-filters/2.4.14.backwards.excludes @@ -0,0 +1,6 @@ +# #21394 remove static config path of levelDBJournal and localSnapshotStore +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.snapshot.local.LocalSnapshotStore.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.journal.leveldb.LeveldbStore.configPath") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.journal.leveldb.LeveldbJournal.configPath") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.journal.leveldb.SharedLeveldbStore.configPath") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.journal.leveldb.LeveldbStore.prepareConfig") diff --git a/akka-persistence/src/main/mima-filters/2.4.2.backwards.excludes b/akka-persistence/src/main/mima-filters/2.4.2.backwards.excludes new file mode 100644 index 0000000000..ecf45d04af --- /dev/null +++ b/akka-persistence/src/main/mima-filters/2.4.2.backwards.excludes @@ -0,0 +1,3 @@ +# #19828 +ProblemFilters.exclude[DirectAbstractMethodProblem]("akka.persistence.Eventsourced#ProcessingState.onWriteMessageComplete") +ProblemFilters.exclude[ReversedAbstractMethodProblem]("akka.persistence.Eventsourced#ProcessingState.onWriteMessageComplete") diff --git a/akka-persistence/src/main/mima-filters/2.4.4.backwards.excludes b/akka-persistence/src/main/mima-filters/2.4.4.backwards.excludes new file mode 100644 index 0000000000..67a92ebd06 --- /dev/null +++ b/akka-persistence/src/main/mima-filters/2.4.4.backwards.excludes @@ -0,0 +1,6 @@ +# #20257 Snapshots with PersistentFSM (experimental feature) +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.serialization.MessageFormats#PersistentStateChangeEventOrBuilder.getTimeoutNanos") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.serialization.MessageFormats#PersistentStateChangeEventOrBuilder.hasTimeoutNanos") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.fsm.PersistentFSM.saveStateSnapshot") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.fsm.PersistentFSM.akka$persistence$fsm$PersistentFSM$$currentStateTimeout") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.fsm.PersistentFSM.akka$persistence$fsm$PersistentFSM$$currentStateTimeout_=") \ No newline at end of file diff --git a/akka-persistence/src/main/mima-filters/2.4.x.backwards.excludes b/akka-persistence/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..fd2873260d --- /dev/null +++ b/akka-persistence/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,54 @@ +# #21717 Improvements to AbstractActor API +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.AbstractPersistentActor.createReceiveRecover") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.AbstractPersistentActor.createReceive") + +# #21423 remove deprecated persist method (persistAll) +# This might filter changes to the ordinary persist method also but not much to do about that +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persist") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persistAsync") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.persist") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.persistAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persist") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persistAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.fsm.AbstractPersistentFSM.persist") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.fsm.AbstractPersistentFSM.persistAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.fsm.AbstractPersistentLoggingFSM.persist") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.fsm.AbstractPersistentLoggingFSM.persistAsync") + +# #21423 removal of deprecated `PersistentView` (in 2.5.x) +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.Update") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.Update$") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView$") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView$ScheduledUpdate") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.AbstractPersistentView") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.UntypedPersistentView") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView$ScheduledUpdate$") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView$State") + +# #16197 Remove backwards compatible workaround in SnapshotSerializer +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.serialization.SnapshotSerializer$") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.serialization.SnapshotHeader") +ProblemFilters.exclude[MissingClassProblem]("akka.persistence.serialization.SnapshotHeader$") + +# #22218 Java Ambiguity in AbstractPersistentActor with Scala 2.12 +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.deferAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persistAllAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persistAll") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.deferAsync") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.persistAllAsync") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.persistAll") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalPersistAsync") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalPersist") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalPersistAll") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalDeferAsync") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalPersistAllAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActorWithAtLeastOnceDelivery.deliver") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.AtLeastOnceDeliveryLike.deliver") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.AtLeastOnceDeliveryLike.internalDeliver") +ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.AbstractPersistentActorWithAtLeastOnceDelivery") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActorWithAtLeastOnceDelivery.deliver") +ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.AbstractPersistentActor") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.deferAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persistAllAsync") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persistAll") diff --git a/akka-persistence/src/main/mima-filters/2.5.3.backwards.excludes b/akka-persistence/src/main/mima-filters/2.5.3.backwards.excludes new file mode 100644 index 0000000000..960dd6dc3a --- /dev/null +++ b/akka-persistence/src/main/mima-filters/2.5.3.backwards.excludes @@ -0,0 +1,4 @@ +# #15733 Timers +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.fsm.PersistentFSM#Timer.apply") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.fsm.PersistentFSM#Timer.copy") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.fsm.PersistentFSM#Timer.this") diff --git a/akka-persistence/src/main/resources/reference.conf b/akka-persistence/src/main/resources/reference.conf index c867fc5034..7fdd0bb6b2 100644 --- a/akka-persistence/src/main/resources/reference.conf +++ b/akka-persistence/src/main/resources/reference.conf @@ -24,8 +24,8 @@ akka.persistence { internal-stash-overflow-strategy = "akka.persistence.ThrowExceptionConfigurator" journal { # Absolute path to the journal plugin configuration entry used by - # persistent actor or view by default. - # Persistent actor or view can override `journalPluginId` method + # persistent actor by default. + # Persistent actor can override `journalPluginId` method # in order to rely on a different journal plugin. plugin = "" # List of journal plugins to start automatically. Use "" for the default journal plugin. @@ -33,8 +33,8 @@ akka.persistence { } snapshot-store { # Absolute path to the snapshot plugin configuration entry used by - # persistent actor or view by default. - # Persistent actor or view can override `snapshotPluginId` method + # persistent actor by default. + # Persistent actor can override `snapshotPluginId` method # in order to rely on a different snapshot plugin. # It is not mandatory to specify a snapshot store plugin. # If you don't use snapshots you don't have to configure it. @@ -49,16 +49,6 @@ akka.persistence { no-snapshot-store { class = "akka.persistence.snapshot.NoSnapshotStore" } - # Default persistent view settings. - view { - # Automated incremental view update. - auto-update = on - # Interval between incremental updates. - auto-update-interval = 5s - # Maximum number of messages to replay per incremental view update. - # Set to -1 for no upper limit. - auto-update-replay-max = -1 - } # Default reliable delivery settings. at-least-once-delivery { # Interval between re-delivery attempts. diff --git a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala index 956f510884..8b79336755 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala @@ -462,9 +462,14 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas timeoutCancellable.cancel() sso.foreach { case SelectedSnapshot(metadata, snapshot) ⇒ - setLastSequenceNr(metadata.sequenceNr) - // Since we are recovering we can ignore the receive behavior from the stack - Eventsourced.super.aroundReceive(recoveryBehavior, SnapshotOffer(metadata, snapshot)) + val offer = SnapshotOffer(metadata, snapshot) + if (recoveryBehavior.isDefinedAt(offer)) { + setLastSequenceNr(metadata.sequenceNr) + // Since we are recovering we can ignore the receive behavior from the stack + Eventsourced.super.aroundReceive(recoveryBehavior, offer) + } else { + unhandled(offer) + } } changeState(recovering(recoveryBehavior, timeout)) journal ! ReplayMessages(lastSequenceNr + 1L, toSnr, replayMax, persistenceId, self) diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala index 17fcdd48bb..c0beb256ee 100644 --- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala +++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala @@ -288,9 +288,9 @@ object PersistentFSM { /** * INTERNAL API */ - // FIXME: what about the cancellable? @InternalApi - private[persistence] final case class Timer(name: String, msg: Any, repeat: Boolean, generation: Int)(context: ActorContext) + private[persistence] final case class Timer(name: String, msg: Any, repeat: Boolean, generation: Int, + owner: AnyRef)(context: ActorContext) extends NoSerializationVerificationNeeded { private var ref: Option[Cancellable] = _ private val scheduler = context.system.scheduler diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala index 0608dd4410..93a6bed4e8 100644 --- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala +++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala @@ -211,7 +211,7 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging if (timers contains name) { timers(name).cancel } - val timer = Timer(name, msg, repeat, timerGen.next)(context) + val timer = Timer(name, msg, repeat, timerGen.next, this)(context) timer.schedule(self, timeout) timers(name) = timer } @@ -412,8 +412,8 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging if (generation == gen) { processMsg(StateTimeout, "state timeout") } - case t @ Timer(name, msg, repeat, gen) ⇒ - if ((timers contains name) && (timers(name).generation == gen)) { + case t @ Timer(name, msg, repeat, gen, owner) ⇒ + if ((owner eq this) && (timers contains name) && (timers(name).generation == gen)) { if (timeoutFuture.isDefined) { timeoutFuture.get.cancel() timeoutFuture = None @@ -575,10 +575,10 @@ trait LoggingPersistentFSM[S, D, E] extends PersistentFSMBase[S, D, E] { this: A private[akka] abstract override def processEvent(event: Event, source: AnyRef): Unit = { if (debugEvent) { val srcstr = source match { - case s: String ⇒ s - case Timer(name, _, _, _) ⇒ "timer " + name - case a: ActorRef ⇒ a.toString - case _ ⇒ "unknown" + case s: String ⇒ s + case Timer(name, _, _, _, _) ⇒ "timer " + name + case a: ActorRef ⇒ a.toString + case _ ⇒ "unknown" } log.debug("processing {} from {} in state {}", event, srcstr, stateName) } diff --git a/akka-persistence/src/test/scala/akka/persistence/EndToEndEventAdapterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EndToEndEventAdapterSpec.scala index 326f42e08c..213245a9f4 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EndToEndEventAdapterSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EndToEndEventAdapterSpec.scala @@ -151,7 +151,7 @@ abstract class EndToEndEventAdapterSpec(journalName: String, journalConfig: Conf """.stripMargin) def persister(name: String, probe: Option[ActorRef] = None)(implicit system: ActorSystem) = - system.actorOf(Props(classOf[EndToEndAdapterActor], name, "akka.persistence.journal." + journalName, probe), name) + system.actorOf(Props(classOf[EndToEndAdapterActor], name, "akka.persistence.journal." + journalName, probe)) def withActorSystem[T](name: String, config: Config)(block: ActorSystem ⇒ T): T = { val system = ActorSystem(name, journalConfig withFallback config) diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala index 2e0f462115..9219ee5b8f 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala @@ -49,6 +49,24 @@ object SnapshotSpec { } } + class IgnoringSnapshotTestPersistentActor(name: String, _recovery: Recovery, probe: ActorRef) extends NamedPersistentActor(name) { + override def recovery: Recovery = _recovery + + override def receiveRecover: Receive = { + case payload: String ⇒ probe ! s"${payload}-${lastSequenceNr}" + case other if !other.isInstanceOf[SnapshotOffer] ⇒ probe ! other + } + + override def receiveCommand = { + case "done" ⇒ probe ! "done" + case payload: String ⇒ + persist(payload) { _ ⇒ + probe ! s"${payload}-${lastSequenceNr}" + } + case other ⇒ probe ! other + } + } + final case class Delete1(metadata: SnapshotMetadata) final case class DeleteN(criteria: SnapshotSelectionCriteria) @@ -95,6 +113,18 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "Sn expectMsg("f-6") expectMsg(RecoveryCompleted) } + "recover completely if snapshot is not handled" in { + val persistentActor = system.actorOf(Props(classOf[IgnoringSnapshotTestPersistentActor], name, Recovery(), testActor)) + val persistenceId = name + + expectMsg("a-1") + expectMsg("b-2") + expectMsg("c-3") + expectMsg("d-4") + expectMsg("e-5") + expectMsg("f-6") + expectMsg(RecoveryCompleted) + } "recover state starting from the most recent snapshot matching an upper sequence number bound" in { val persistentActor = system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, Recovery(toSequenceNr = 3), testActor)) val persistenceId = name diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala index 7d6c678f23..ca46508271 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala @@ -103,7 +103,7 @@ abstract class RemoteNodeRestartDeathWatchSpec(multiNodeConfig: RemoteNodeRestar } runOn(second) { - val addr = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress system.actorOf(Props[Subject], "subject") enterBarrier("actors-started") @@ -112,8 +112,8 @@ abstract class RemoteNodeRestartDeathWatchSpec(multiNodeConfig: RemoteNodeRestar Await.ready(system.whenTerminated, 30.seconds) val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" - akka.remote.netty.tcp.port = ${addr.port.get} - akka.remote.artery.canonical.port = ${addr.port.get} + akka.remote.netty.tcp.port = ${address.port.get} + akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject], "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala index 6a6d9438dd..6c427741ac 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala @@ -89,7 +89,7 @@ abstract class RemoteNodeRestartGateSpec } runOn(second) { - val addr = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val firstAddress = node(first).address enterBarrier("gated") @@ -99,8 +99,8 @@ abstract class RemoteNodeRestartGateSpec val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" akka.remote.retry-gate-closed-for = 0.5 s akka.remote.netty.tcp { - hostname = ${addr.host.get} - port = ${addr.port.get} + hostname = ${address.host.get} + port = ${address.port.get} } """).withFallback(system.settings.config)) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala index 0b2419a7ff..6ee347036f 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala @@ -123,7 +123,7 @@ abstract class RemoteNodeShutdownAndComesBackSpec } runOn(second) { - val addr = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress system.actorOf(Props[Subject], "subject") system.actorOf(Props[Subject], "sysmsgBarrier") val path = node(first) @@ -134,8 +134,8 @@ abstract class RemoteNodeShutdownAndComesBackSpec Await.ready(system.whenTerminated, 30.seconds) val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" - akka.remote.netty.tcp.port = ${addr.port.get} - akka.remote.artery.canonical.port = ${addr.port.get} + akka.remote.netty.tcp.port = ${address.port.get} + akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject], "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala index 8843413c58..edbf00eab3 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala @@ -103,7 +103,7 @@ abstract class RemoteQuarantinePiercingSpec(multiNodeConfig: RemoteQuarantinePie } runOn(second) { - val addr = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress system.actorOf(Props[Subject], "subject") enterBarrier("actors-started") @@ -112,8 +112,8 @@ abstract class RemoteQuarantinePiercingSpec(multiNodeConfig: RemoteQuarantinePie Await.ready(system.whenTerminated, 30.seconds) val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" - akka.remote.netty.tcp.port = ${addr.port.get} - akka.remote.artery.canonical.port = ${addr.port.get} + akka.remote.netty.tcp.port = ${address.port.get} + akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject], "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala index 67c7a64d15..c6ff21ec3b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala @@ -35,6 +35,7 @@ class RemoteReDeploymentConfig(artery: Boolean) extends MultiNodeConfig { acceptable-heartbeat-pause=2.5s } akka.remote.artery.enabled = $artery + akka.loglevel = INFO """)).withFallback(RemotingMultiNodeSpec.commonConfig)) testTransport(on = true) @@ -79,16 +80,18 @@ abstract class RemoteReDeploymentSlowMultiJvmSpec(artery: Boolean) extends Remot } object RemoteReDeploymentMultiJvmSpec { - class Parent extends Actor { + class Parent extends Actor with ActorLogging { val monitor = context.actorSelection("/user/echo") + log.info(s"Started Parent on path ${self.path}") def receive = { case (p: Props, n: String) ⇒ context.actorOf(p, n) case msg ⇒ monitor ! msg } } - class Hello extends Actor { + class Hello extends Actor with ActorLogging { val monitor = context.actorSelection("/user/echo") + log.info(s"Started Hello on path ${self.path} with parent ${context.parent.path}") context.parent ! "HelloParent" override def preStart(): Unit = monitor ! "PreStart" override def postStop(): Unit = monitor ! "PostStop" @@ -119,26 +122,32 @@ abstract class RemoteReDeploymentMultiJvmSpec(multiNodeConfig: RemoteReDeploymen "A remote deployment target system" must { "terminate the child when its parent system is replaced by a new one" in { - + // Any message sent to `echo` will be passed on to `testActor` val echo = system.actorOf(echoProps(testActor), "echo") enterBarrier("echo-started") runOn(second) { + // Create a 'Parent' actor on the 'second' node + // have it create a 'Hello' child (which will be on the 'first' node due to the deployment config): system.actorOf(Props[Parent], "parent") ! ((Props[Hello], "hello")) + // The 'Hello' child will send "HelloParent" to the 'Parent', which will pass it to the 'echo' monitor: expectMsg(15.seconds, "HelloParent") } runOn(first) { + // Check the 'Hello' actor was started on the first node expectMsg(15.seconds, "PreStart") } enterBarrier("first-deployed") + // Disconnect the second system from the first, and shut it down runOn(first) { testConductor.blackhole(second, first, Both).await testConductor.shutdown(second, abort = true).await if (expectQuarantine) within(sleepAfterKill) { + // The quarantine of node 2, where the Parent lives, should cause the Hello child to be stopped: expectMsg("PostStop") expectNoMsg() } @@ -148,6 +157,7 @@ abstract class RemoteReDeploymentMultiJvmSpec(multiNodeConfig: RemoteReDeploymen var sys: ActorSystem = null + // Start the second system again runOn(second) { Await.ready(system.whenTerminated, 30.seconds) expectNoMsg(sleepAfterKill) @@ -156,6 +166,7 @@ abstract class RemoteReDeploymentMultiJvmSpec(multiNodeConfig: RemoteReDeploymen enterBarrier("cable-cut") + // add new echo, parent, and (if needed) Hello actors: runOn(second) { val p = TestProbe()(sys) sys.actorOf(echoProps(p.ref), "echo") @@ -165,6 +176,7 @@ abstract class RemoteReDeploymentMultiJvmSpec(multiNodeConfig: RemoteReDeploymen enterBarrier("re-deployed") + // Check the Hello actor is (re)started on node 1: runOn(first) { within(15.seconds) { if (expectQuarantine) expectMsg("PreStart") @@ -174,8 +186,10 @@ abstract class RemoteReDeploymentMultiJvmSpec(multiNodeConfig: RemoteReDeploymen enterBarrier("the-end") + // After this we expect no further messages expectNoMsg(1.second) + // Until we clean up after ourselves enterBarrier("stopping") runOn(second) { diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala index 2f034f672f..3e503490d8 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala @@ -97,7 +97,7 @@ abstract class RemoteRestartedQuarantinedSpec } runOn(second) { - val addr = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val firstAddress = node(first).address system.eventStream.subscribe(testActor, classOf[ThisActorSystemQuarantinedEvent]) @@ -125,18 +125,18 @@ abstract class RemoteRestartedQuarantinedSpec val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" akka.remote.retry-gate-closed-for = 0.5 s akka.remote.netty.tcp { - hostname = ${addr.host.get} - port = ${addr.port.get} + hostname = ${address.host.get} + port = ${address.port.get} } """).withFallback(system.settings.config)) + // retry because it's possible to loose the initial message here, see issue #17314 val probe = TestProbe()(freshSystem) - - freshSystem.actorSelection(RootActorPath(firstAddress) / "user" / "subject").tell(Identify("subject"), probe.ref) - // TODO sometimes it takes long time until the new connection is established, - // It seems like there must first be a transport failure detector timeout, that triggers - // "No response from remote. Handshake timed out or transport failure detector triggered". - probe.expectMsgType[ActorIdentity](30.second).ref should not be (None) + probe.awaitAssert({ + println(s"# --") // FIXME + freshSystem.actorSelection(RootActorPath(firstAddress) / "user" / "subject").tell(Identify("subject"), probe.ref) + probe.expectMsgType[ActorIdentity](1.second).ref should not be (None) + }, 30.seconds) // Now the other system will be able to pass, too freshSystem.actorOf(Props[Subject], "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala index 5659044bc9..c12d7a4482 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala @@ -26,7 +26,7 @@ import akka.remote.artery.MaxThroughputSpec._ object FanInThroughputSpec extends MultiNodeConfig { val totalNumberOfNodes = - System.getProperty("MultiJvm.akka.test.FanInThroughputSpec.nrOfNodes") match { + System.getProperty("akka.test.FanInThroughputSpec.nrOfNodes") match { case null ⇒ 4 case value ⇒ value.toInt } @@ -41,6 +41,10 @@ object FanInThroughputSpec extends MultiNodeConfig { # for serious measurements you should increase the totalMessagesFactor (20) akka.test.FanInThroughputSpec.totalMessagesFactor = 10.0 akka.test.FanInThroughputSpec.real-message = off + akka.test.FanInThroughputSpec.actor-selection = off + akka.remote.artery.advanced { + inbound-lanes = 4 + } """)) .withFallback(MaxThroughputSpec.cfg) .withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -61,6 +65,7 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput val totalMessagesFactor = system.settings.config.getDouble("akka.test.FanInThroughputSpec.totalMessagesFactor") val realMessage = system.settings.config.getBoolean("akka.test.FanInThroughputSpec.real-message") + val actorSelection = system.settings.config.getBoolean("akka.test.FanInThroughputSpec.actor-selection") var plot = PlotResult() @@ -85,9 +90,12 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput super.afterAll() } - def identifyReceiver(name: String, r: RoleName): ActorRef = { - system.actorSelection(node(r) / "user" / name) ! Identify(None) - expectMsgType[ActorIdentity](10.seconds).ref.get + def identifyReceiver(name: String, r: RoleName): Target = { + val sel = system.actorSelection(node(r) / "user" / name) + sel ! Identify(None) + val ref = expectMsgType[ActorIdentity](10.seconds).ref.get + if (actorSelection) ActorSelectionTarget(sel, ref) + else ActorRefTarget(ref) } val scenarios = List( @@ -146,7 +154,7 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput val ignore = TestProbe() val receivers = (1 to sendingNodes.size).map { n ⇒ identifyReceiver(receiverName + "-" + n, roles.head) - }.toArray[ActorRef] + }.toArray[Target] val idx = roles.indexOf(myself) - 1 val receiver = receivers(idx) @@ -171,7 +179,6 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput } "Max throughput of fan-in" must { - pending val reporter = BenchmarkFileReporter("FanInThroughputSpec", system) for (s ← scenarios) { s"be great for ${s.testName}, burstSize = ${s.burstSize}, payloadSize = ${s.payloadSize}" in test(s, reporter) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala index 74b5440ca0..3fe83e6e3b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala @@ -26,7 +26,7 @@ import akka.remote.artery.MaxThroughputSpec._ object FanOutThroughputSpec extends MultiNodeConfig { val totalNumberOfNodes = - System.getProperty("MultiJvm.akka.test.FanOutThroughputSpec.nrOfNodes") match { + System.getProperty("akka.test.FanOutThroughputSpec.nrOfNodes") match { case null ⇒ 4 case value ⇒ value.toInt } @@ -41,6 +41,7 @@ object FanOutThroughputSpec extends MultiNodeConfig { # for serious measurements you should increase the totalMessagesFactor (20) akka.test.FanOutThroughputSpec.totalMessagesFactor = 10.0 akka.test.FanOutThroughputSpec.real-message = off + akka.test.FanOutThroughputSpec.actor-selection = off """)) .withFallback(MaxThroughputSpec.cfg) .withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -61,6 +62,7 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp val totalMessagesFactor = system.settings.config.getDouble("akka.test.FanOutThroughputSpec.totalMessagesFactor") val realMessage = system.settings.config.getBoolean("akka.test.FanOutThroughputSpec.real-message") + val actorSelection = system.settings.config.getBoolean("akka.test.FanOutThroughputSpec.actor-selection") var plot = PlotResult() @@ -85,9 +87,12 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp super.afterAll() } - def identifyReceiver(name: String, r: RoleName): ActorRef = { - system.actorSelection(node(r) / "user" / name) ! Identify(None) - expectMsgType[ActorIdentity](10.seconds).ref.get + def identifyReceiver(name: String, r: RoleName): Target = { + val sel = system.actorSelection(node(r) / "user" / name) + sel ! Identify(None) + val ref = expectMsgType[ActorIdentity](10.seconds).ref.get + if (actorSelection) ActorSelectionTarget(sel, ref) + else ActorRefTarget(ref) } val burstSize = 2000 / senderReceiverPairs @@ -143,7 +148,7 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp runOn(roles.head) { enterBarrier(receiverName + "-started") val ignore = TestProbe() - val receivers = targetNodes.map(target ⇒ identifyReceiver(receiverName, target)).toArray[ActorRef] + val receivers = targetNodes.map(target ⇒ identifyReceiver(receiverName, target)).toArray[Target] val senders = for ((target, i) ← targetNodes.zipWithIndex) yield { val receiver = receivers(i) val plotProbe = TestProbe() diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala index 254d0942e9..91231a0d8e 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala @@ -99,13 +99,13 @@ abstract class HandshakeRestartReceiverSpec } runOn(second) { - val addr = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress enterBarrier("before-shutdown") Await.result(system.whenTerminated, 10.seconds) val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${addr.port.get} + akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject], "subject2") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala index 20718b1195..2475a15805 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala @@ -33,6 +33,7 @@ object MaxThroughputSpec extends MultiNodeConfig { # for serious measurements you should increase the totalMessagesFactor (20) akka.test.MaxThroughputSpec.totalMessagesFactor = 10.0 akka.test.MaxThroughputSpec.real-message = off + akka.test.MaxThroughputSpec.actor-selection = off akka { loglevel = INFO log-dead-letters = 10000 @@ -78,8 +79,8 @@ object MaxThroughputSpec extends MultiNodeConfig { akka.remote.default-remote-dispatcher { fork-join-executor { # parallelism-factor = 0.5 - parallelism-min = 2 - parallelism-max = 2 + parallelism-min = 4 + parallelism-max = 4 } # Set to 10 by default. Might be worthwhile to experiment with. # throughput = 100 @@ -97,6 +98,19 @@ object MaxThroughputSpec extends MultiNodeConfig { final case class EndResult(totalReceived: Long) extends JavaSerializable final case class FlowControl(burstStartTime: Long) extends Echo + sealed trait Target { + def tell(msg: Any, sender: ActorRef): Unit + def ref: ActorRef + } + + final case class ActorRefTarget(override val ref: ActorRef) extends Target { + override def tell(msg: Any, sender: ActorRef) = ref.tell(msg, sender) + } + + final case class ActorSelectionTarget(sel: ActorSelection, override val ref: ActorRef) extends Target { + override def tell(msg: Any, sender: ActorRef) = sel.tell(msg, sender) + } + def receiverProps(reporter: RateReporter, payloadSize: Int, printTaskRunnerMetrics: Boolean, numSenders: Int): Props = Props(new Receiver(reporter, payloadSize, printTaskRunnerMetrics, numSenders)).withDispatcher("akka.remote.default-remote-dispatcher") @@ -137,11 +151,11 @@ object MaxThroughputSpec extends MultiNodeConfig { } } - def senderProps(mainTarget: ActorRef, targets: Array[ActorRef], testSettings: TestSettings, plotRef: ActorRef, + def senderProps(mainTarget: Target, targets: Array[Target], testSettings: TestSettings, plotRef: ActorRef, printTaskRunnerMetrics: Boolean, reporter: BenchmarkFileReporter): Props = Props(new Sender(mainTarget, targets, testSettings, plotRef, printTaskRunnerMetrics, reporter)) - class Sender(target: ActorRef, targets: Array[ActorRef], testSettings: TestSettings, plotRef: ActorRef, printTaskRunnerMetrics: Boolean, reporter: BenchmarkFileReporter) + class Sender(target: Target, targets: Array[Target], testSettings: TestSettings, plotRef: ActorRef, printTaskRunnerMetrics: Boolean, reporter: BenchmarkFileReporter) extends Actor { val numTargets = targets.size @@ -161,7 +175,7 @@ object MaxThroughputSpec extends MultiNodeConfig { def receive = { case Run ⇒ if (compressionEnabled) { - target ! Warmup(payload) + target.tell(Warmup(payload), self) context.setReceiveTimeout(1.second) context.become(waitingForCompression) } else runWarmup() @@ -169,18 +183,22 @@ object MaxThroughputSpec extends MultiNodeConfig { def waitingForCompression: Receive = { case ReceivedActorRefCompressionTable(_, table) ⇒ - if (table.dictionary.contains(target)) { + val ref = target match { + case ActorRefTarget(ref) ⇒ ref + case ActorSelectionTarget(sel, _) ⇒ sel.anchor + } + if (table.dictionary.contains(ref)) { context.setReceiveTimeout(Duration.Undefined) runWarmup() } else - target ! Warmup(payload) + target.tell(Warmup(payload), self) case ReceiveTimeout ⇒ - target ! Warmup(payload) + target.tell(Warmup(payload), self) } def runWarmup(): Unit = { sendBatch(warmup = true) // first some warmup - targets.foreach(_ ! Start(target)) // then Start, which will echo back here + targets.foreach(_.tell(Start(target.ref), self)) // then Start, which will echo back here context.become(warmup) } @@ -268,9 +286,9 @@ object MaxThroughputSpec extends MultiNodeConfig { def sendFlowControl(t0: Long): Unit = { if (remaining <= 0) { context.become(waitingForEndResult) - targets.foreach(_ ! End) + targets.foreach(_.tell(End, self)) } else - target ! FlowControl(t0) + target.tell(FlowControl(t0), self) } } @@ -331,6 +349,7 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec val totalMessagesFactor = system.settings.config.getDouble("akka.test.MaxThroughputSpec.totalMessagesFactor") val realMessage = system.settings.config.getBoolean("akka.test.MaxThroughputSpec.real-message") + val actorSelection = system.settings.config.getBoolean("akka.test.MaxThroughputSpec.actor-selection") var plot = PlotResult() @@ -355,9 +374,12 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec super.afterAll() } - def identifyReceiver(name: String, r: RoleName = second): ActorRef = { - system.actorSelection(node(r) / "user" / name) ! Identify(None) - expectMsgType[ActorIdentity](10.seconds).ref.get + def identifyReceiver(name: String, r: RoleName = second): Target = { + val sel = system.actorSelection(node(r) / "user" / name) + sel ! Identify(None) + val ref = expectMsgType[ActorIdentity](10.seconds).ref.get + if (actorSelection) ActorSelectionTarget(sel, ref) + else ActorRefTarget(ref) } val scenarios = List( diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala index f4bd035882..994aae0a83 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala @@ -81,7 +81,7 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo } runOn(second) { - val addr = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val firstAddress = node(first).address system.eventStream.subscribe(testActor, classOf[ThisActorSystemQuarantinedEvent]) @@ -106,7 +106,7 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo Await.result(system.whenTerminated, 10.seconds) val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${addr.port.get} + akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) val probe = TestProbe()(freshSystem) diff --git a/akka-remote/src/main/mima-filters/2.4.0.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.0.backwards.excludes new file mode 100644 index 0000000000..b5ed7890c6 --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.0.backwards.excludes @@ -0,0 +1,5 @@ +ProblemFilters.exclude[Problem]("akka.remote.transport.ProtocolStateActor") + +# #18353 Changes to methods and fields private to remoting actors +ProblemFilters.exclude[MissingMethodProblem]("akka.remote.EndpointManager.retryGateEnabled") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.EndpointManager.pruneTimerCancellable") diff --git a/akka-remote/src/main/mima-filters/2.4.1.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.1.backwards.excludes new file mode 100644 index 0000000000..47c6c931de --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.1.backwards.excludes @@ -0,0 +1,8 @@ +# #19133 change in internal actor +ProblemFilters.exclude[MissingMethodProblem]("akka.remote.ReliableDeliverySupervisor.gated") + +# #18758 report invalid association events +ProblemFilters.exclude[MissingTypesProblem]("akka.remote.InvalidAssociation$") +ProblemFilters.exclude[MissingMethodProblem]("akka.remote.InvalidAssociation.apply") +ProblemFilters.exclude[MissingMethodProblem]("akka.remote.InvalidAssociation.copy") +ProblemFilters.exclude[MissingMethodProblem]("akka.remote.InvalidAssociation.this") diff --git a/akka-remote/src/main/mima-filters/2.4.10.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.10.backwards.excludes new file mode 100644 index 0000000000..ffc468a509 --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.10.backwards.excludes @@ -0,0 +1,17 @@ +# Remove useUntrustedMode which is an internal API and not used anywhere anymore +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.Remoting.useUntrustedMode") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.RemoteTransport.useUntrustedMode") + +# Use OptionVal in remote Send envelope +ProblemFilters.exclude[Problem]("akka.remote.EndpointManager*") +ProblemFilters.exclude[Problem]("akka.remote.Remoting*") +ProblemFilters.exclude[Problem]("akka.remote.RemoteTransport*") +ProblemFilters.exclude[Problem]("akka.remote.InboundMessageDispatcher*") +ProblemFilters.exclude[Problem]("akka.remote.DefaultMessageDispatcher*") +ProblemFilters.exclude[Problem]("akka.remote.transport*") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.RemoteActorRefProvider.quarantine") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.RemoteWatcher.quarantine") + +# #20644 long uids +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.RemoteWatcher.receiveHeartbeatRsp") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.RemoteWatcher.selfHeartbeatRspMsg") diff --git a/akka-remote/src/main/mima-filters/2.4.11.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.11.backwards.excludes new file mode 100644 index 0000000000..7cf4e1a61b --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.11.backwards.excludes @@ -0,0 +1,29 @@ +# MarkerLoggingAdapter introduced (all internal classes) +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.RemoteSystemDaemon.this") + +# method this(akka.actor.ExtendedActorSystem,akka.remote.RemoteActorRefProvider,akka.event.LoggingAdapter)Unit in class akka.remote.DefaultMessageDispatcher's type is different in current version, where it is (akka.actor.ExtendedActorSystem,akka.remote.RemoteActorRefProvider,akka.event.MarkerLoggingAdapter)Unit instead of (akka.actor.ExtendedActorSystem,akka.remote.RemoteActorRefProvider,akka.event.LoggingAdapter)Unit +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.DefaultMessageDispatcher.this") +# trait akka.remote.artery.StageLogging does not have a correspondent in current version +ProblemFilters.exclude[MissingClassProblem]("akka.remote.artery.StageLogging") +# method SSLProtocol()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLProtocol") +# method SSLTrustStorePassword()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLTrustStorePassword") +# method SSLKeyStorePassword()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLKeyStorePassword") +# method SSLRandomNumberGenerator()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLRandomNumberGenerator") +# method SSLKeyPassword()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLKeyPassword") +# method SSLKeyStore()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLKeyStore") +# method SSLTrustStore()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLTrustStore") +# method initializeClientSSL(akka.remote.transport.netty.SSLSettings,akka.event.LoggingAdapter)org.jboss.netty.handler.ssl.SslHandler in object akka.remote.transport.netty.NettySSLSupport does not have a correspondent in current version +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.NettySSLSupport.initializeClientSSL") +# method apply(akka.remote.transport.netty.SSLSettings,akka.event.LoggingAdapter,Boolean)org.jboss.netty.handler.ssl.SslHandler in object akka.remote.transport.netty.NettySSLSupport's type is different in current version, where it is (akka.remote.transport.netty.SSLSettings,akka.event.MarkerLoggingAdapter,Boolean)org.jboss.netty.handler.ssl.SslHandler instead of (akka.remote.transport.netty.SSLSettings,akka.event.LoggingAdapter,Boolean)org.jboss.netty.handler.ssl.SslHandler +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.transport.netty.NettySSLSupport.apply") +# initializeCustomSecureRandom(scala.Option,akka.event.LoggingAdapter)java.security.SecureRandom in object akka.remote.transport.netty.NettySSLSupport does not have a correspondent in current version +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.NettySSLSupport.initializeCustomSecureRandom") +# method initializeServerSSL(akka.remote.transport.netty.SSLSettings,akka.event.LoggingAdapter)org.jboss.netty.handler.ssl.SslHandler in object akka.remote.transport.netty.NettySSLSupport does not have a correspondent in current version +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.NettySSLSupport.initializeServerSSL") diff --git a/akka-remote/src/main/mima-filters/2.4.13.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.13.backwards.excludes new file mode 100644 index 0000000000..3f27489a81 --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.13.backwards.excludes @@ -0,0 +1,4 @@ +# extension method isEmpty$extension(Int)Boolean in object akka.remote.artery.compress.TopHeavyHitters#HashCodeVal does not have a correspondent in current version +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.artery.compress.TopHeavyHitters#HashCodeVal.isEmpty$extension") +# isEmpty()Boolean in class akka.remote.artery.compress.TopHeavyHitters#HashCodeVal does not have a correspondent in current version +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.artery.compress.TopHeavyHitters#HashCodeVal.isEmpty") diff --git a/akka-remote/src/main/mima-filters/2.4.14.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.14.backwards.excludes new file mode 100644 index 0000000000..1f70398b3f --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.14.backwards.excludes @@ -0,0 +1,2 @@ +ProblemFilters.exclude[Problem]("akka.remote.artery.*") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.MessageSerializer.serializeForArtery") diff --git a/akka-remote/src/main/mima-filters/2.4.16.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.16.backwards.excludes new file mode 100644 index 0000000000..bfc3e56d98 --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.16.backwards.excludes @@ -0,0 +1,2 @@ +# internal classes +ProblemFilters.exclude[Problem]("akka.remote.artery.*") diff --git a/akka-remote/src/main/mima-filters/2.4.17.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.17.backwards.excludes new file mode 100644 index 0000000000..b764c89e76 --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.17.backwards.excludes @@ -0,0 +1,39 @@ +# #22277 changes to internal classes +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.TcpServerHandler.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.TcpClientHandler.this") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.netty.TcpHandlers.log") + +# #22224 DaemonMsgCreateSerializer using manifests +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData.getClassesBytes") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData.getClassesList") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData.getClassesCount") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData.getClasses") +ProblemFilters.exclude[MissingFieldProblem]("akka.remote.WireFormats#PropsData.CLASSES_FIELD_NUMBER") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getHasManifest") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getHasManifestCount") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getSerializerIdsList") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getSerializerIds") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getHasManifestList") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getSerializerIdsCount") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getClassesBytes") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getClassesList") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getClassesCount") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getClasses") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getManifestsBytes") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getManifests") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getManifestsList") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getManifestsCount") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.getClassesBytes") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.getClassesList") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.addClassesBytes") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.getClassesCount") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.clearClasses") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.addClasses") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.getClasses") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.addAllClasses") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.setClasses") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.serialize") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.deserialize") +ProblemFilters.exclude[FinalClassProblem]("akka.remote.serialization.DaemonMsgCreateSerializer") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.serialization") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.this") diff --git a/akka-remote/src/main/mima-filters/2.4.6.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.6.backwards.excludes new file mode 100644 index 0000000000..0f417c423c --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.6.backwards.excludes @@ -0,0 +1,2 @@ +# #20531 adding refuseUid to Gated +ProblemFilters.exclude[Problem]("akka.remote.EndpointManager$Gated") diff --git a/akka-remote/src/main/mima-filters/2.4.x.backwards.excludes b/akka-remote/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..d121ed9e5a --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,36 @@ +# #21423 removal of deprecated serializer constructors (in 2.5.x) +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.ProtobufSerializer.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.MessageContainerSerializer.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.this") + +# #21423 remove deprecated methods in routing +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.routing.RemoteRouterConfig.nrOfInstances") + +# #21423 remove deprecated ARRAY_OF_BYTE_ARRAY +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.ProtobufSerializer.ARRAY_OF_BYTE_ARRAY") + +# #21423 remove deprecated constructor in DeadlineFailureDetector +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.DeadlineFailureDetector.this") + +# #22015 removal of deprecated AESCounterSecureInetRNGs +ProblemFilters.exclude[MissingClassProblem]("akka.remote.security.provider.AES128CounterInetRNG") +ProblemFilters.exclude[MissingClassProblem]("akka.remote.security.provider.AES256CounterInetRNG") +ProblemFilters.exclude[MissingClassProblem]("akka.remote.security.provider.InternetSeedGenerator") +ProblemFilters.exclude[MissingClassProblem]("akka.remote.security.provider.InternetSeedGenerator$") + +# #22332 protobuf serializers for remote deployment +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getConfigManifest") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasScopeManifest") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getScopeManifestBytes") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getConfigSerializerId") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasRouterConfigSerializerId") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasRouterConfigManifest") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getRouterConfigSerializerId") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getRouterConfigManifestBytes") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getConfigManifestBytes") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasConfigManifest") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasScopeSerializerId") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getRouterConfigManifest") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasConfigSerializerId") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getScopeSerializerId") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getScopeManifest") diff --git a/akka-remote/src/main/mima-filters/2.5.2.backwards.excludes b/akka-remote/src/main/mima-filters/2.5.2.backwards.excludes new file mode 100644 index 0000000000..4ab99c120b --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.5.2.backwards.excludes @@ -0,0 +1,3 @@ +# #23023 added a new overload with implementation to trait, so old transport implementations compiled against +# older versions will be missing the method. We accept that incompatibility for now. +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.AssociationHandle.disassociate") \ No newline at end of file diff --git a/akka-remote/src/main/mima-filters/2.5.3.backwards.excludes b/akka-remote/src/main/mima-filters/2.5.3.backwards.excludes new file mode 100644 index 0000000000..c82dbcace6 --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.5.3.backwards.excludes @@ -0,0 +1,2 @@ +#21880 PartitionHub in Artery +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.artery.ArterySettings#Advanced.InboundBroadcastHubBufferSize") diff --git a/akka-remote/src/main/mima-filters/2.5.4.backwards.excludes b/akka-remote/src/main/mima-filters/2.5.4.backwards.excludes new file mode 100644 index 0000000000..465b305f2b --- /dev/null +++ b/akka-remote/src/main/mima-filters/2.5.4.backwards.excludes @@ -0,0 +1,3 @@ +#23504 compression tables +ProblemFilters.exclude[MissingClassProblem]("akka.remote.artery.compress.InboundCompression$State$") +ProblemFilters.exclude[MissingClassProblem]("akka.remote.artery.compress.InboundCompression$State") diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 14658835f7..6b2e3010ef 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -299,8 +299,8 @@ private[akka] class RemoteActorRefProvider( } Iterator(props.deploy) ++ deployment.iterator reduce ((a, b) ⇒ b withFallback a) match { - case d @ Deploy(_, _, _, RemoteScope(addr), _, _) ⇒ - if (hasAddress(addr)) { + case d @ Deploy(_, _, _, RemoteScope(address), _, _) ⇒ + if (hasAddress(address)) { local.actorOf(system, props, supervisor, path, false, deployment.headOption, false, async) } else if (props.deploy.scope == LocalScope) { throw new ConfigurationException(s"configuration requested remote deployment for local-only Props at [$path]") @@ -313,8 +313,8 @@ private[akka] class RemoteActorRefProvider( case NonFatal(e) ⇒ throw new ConfigurationException( s"configuration problem while creating [$path] with dispatcher [${props.dispatcher}] and mailbox [${props.mailbox}]", e) } - val localAddress = transport.localAddressForRemote(addr) - val rpath = (RootActorPath(addr) / "remote" / localAddress.protocol / localAddress.hostPort / path.elements). + val localAddress = transport.localAddressForRemote(address) + val rpath = (RootActorPath(address) / "remote" / localAddress.protocol / localAddress.hostPort / path.elements). withUid(path.uid) new RemoteActorRef(transport, localAddress, rpath, supervisor, Some(props), Some(d)) } catch { diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala b/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala index 3314156524..5ff4b6dd3a 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala @@ -142,7 +142,7 @@ private[akka] final class ArterySettings private (config: Config) { .requiring(_ >= 32 * 1024, "maximum-frame-size must be greater than or equal to 32 KiB") final val BufferPoolSize: Int = getInt("buffer-pool-size") .requiring(_ > 0, "buffer-pool-size must be greater than 0") - final val InboundBroadcastHubBufferSize = BufferPoolSize / 2 + final val InboundHubBufferSize = BufferPoolSize / 2 final val MaximumLargeFrameSize: Int = math.min(getBytes("maximum-large-frame-size"), Int.MaxValue).toInt .requiring(_ >= 32 * 1024, "maximum-large-frame-size must be greater than or equal to 32 KiB") final val LargeBufferPoolSize: Int = getInt("large-buffer-pool-size") diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala index 3631212bb6..54a531e388 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala @@ -729,34 +729,36 @@ private[remote] class ArteryTransport(_system: ExtendedActorSystem, _provider: R } else { val hubKillSwitch = KillSwitches.shared("hubKillSwitch") - val source: Source[(OptionVal[InternalActorRef], InboundEnvelope), (ResourceLifecycle, InboundCompressionAccess)] = + val source: Source[InboundEnvelope, (ResourceLifecycle, InboundCompressionAccess)] = aeronSource(ordinaryStreamId, envelopeBufferPool) .via(hubKillSwitch.flow) .viaMat(inboundFlow(settings, _inboundCompressions))(Keep.both) - .map(env ⇒ (env.recipient, env)) - val (resourceLife, compressionAccess, broadcastHub) = - source - .toMat(BroadcastHub.sink(bufferSize = settings.Advanced.InboundBroadcastHubBufferSize))({ case ((a, b), c) ⇒ (a, b, c) }) - .run()(materializer) - - // select lane based on destination, to preserve message order - def shouldUseLane(recipient: OptionVal[ActorRef], targetLane: Int): Boolean = - recipient match { - case OptionVal.Some(r) ⇒ math.abs(r.path.uid) % inboundLanes == targetLane - case OptionVal.None ⇒ 0 == targetLane + // Select lane based on destination to preserve message order, + // Also include the uid of the sending system in the hash to spread + // "hot" destinations, e.g. ActorSelection anchor. + val partitioner: InboundEnvelope ⇒ Int = env ⇒ { + env.recipient match { + case OptionVal.Some(r) ⇒ + val a = r.path.uid + val b = env.originUid + val hashA = 23 + a + val hash: Int = 23 * hashA + java.lang.Long.hashCode(b) + math.abs(hash) % inboundLanes + case OptionVal.None ⇒ 0 } + } + + val (resourceLife, compressionAccess, hub) = + source + .toMat(Sink.fromGraph(new FixedSizePartitionHub[InboundEnvelope](partitioner, inboundLanes, + settings.Advanced.InboundHubBufferSize)))({ case ((a, b), c) ⇒ (a, b, c) }) + .run()(materializer) val lane = inboundSink(envelopeBufferPool) val completedValues: Vector[Future[Done]] = - (0 until inboundLanes).map { laneId ⇒ - broadcastHub - // TODO replace filter with "PartitionHub" when that is implemented - // must use a tuple here because envelope is pooled and must only be read in the selected lane - // otherwise, the lane that actually processes it might have already released it. - .collect { case (recipient, env) if shouldUseLane(recipient, laneId) ⇒ env } - .toMat(lane)(Keep.right) - .run()(materializer) + (0 until inboundLanes).map { _ ⇒ + hub.toMat(lane)(Keep.right).run()(materializer) }(collection.breakOut) import system.dispatcher diff --git a/akka-remote/src/main/scala/akka/remote/artery/BufferPool.scala b/akka-remote/src/main/scala/akka/remote/artery/BufferPool.scala index e00bfc456d..909930e664 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/BufferPool.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/BufferPool.scala @@ -377,12 +377,11 @@ private[remote] final class EnvelopeBuffer(val byteBuffer: ByteBuffer) { // Write fixed length parts byteBuffer.put(VersionOffset, header.version) byteBuffer.put(FlagsOffset, header.flags) - byteBuffer.putLong(UidOffset, header.uid) - byteBuffer.putInt(SerializerOffset, header.serializer) - // compression table version numbers byteBuffer.put(ActorRefCompressionTableVersionOffset, header.outboundActorRefCompression.version) byteBuffer.put(ClassManifestCompressionTableVersionOffset, header.outboundClassManifestCompression.version) + byteBuffer.putLong(UidOffset, header.uid) + byteBuffer.putInt(SerializerOffset, header.serializer) // maybe write some metadata // after metadata is written (or not), buffer is at correct position to continue writing literals @@ -421,12 +420,11 @@ private[remote] final class EnvelopeBuffer(val byteBuffer: ByteBuffer) { // Read fixed length parts header.setVersion(byteBuffer.get(VersionOffset)) header.setFlags(byteBuffer.get(FlagsOffset)) - header.setUid(byteBuffer.getLong(UidOffset)) - header.setSerializer(byteBuffer.getInt(SerializerOffset)) - // compression table versions (stored in the Tag) header._inboundActorRefCompressionTableVersion = byteBuffer.get(ActorRefCompressionTableVersionOffset) header._inboundClassManifestCompressionTableVersion = byteBuffer.get(ClassManifestCompressionTableVersionOffset) + header.setUid(byteBuffer.getLong(UidOffset)) + header.setSerializer(byteBuffer.getInt(SerializerOffset)) byteBuffer.position(MetadataContainerAndLiteralSectionOffset) if (header.flag(MetadataPresentFlag)) { diff --git a/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala b/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala new file mode 100644 index 0000000000..7426b58db4 --- /dev/null +++ b/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala @@ -0,0 +1,73 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ +package akka.remote.artery + +import akka.annotation.InternalApi +import akka.stream.scaladsl.PartitionHub +import org.agrona.concurrent.OneToOneConcurrentArrayQueue +import java.util.concurrent.atomic.AtomicInteger +import org.agrona.concurrent.ManyToManyConcurrentArrayQueue + +/** + * INTERNAL API + */ +@InternalApi private[akka] class FixedSizePartitionHub[T]( + partitioner: T ⇒ Int, + lanes: Int, + bufferSize: Int) extends PartitionHub[T](() ⇒ (info, elem) ⇒ info.consumerIdByIdx(partitioner(elem)), lanes, bufferSize - 1) { + // -1 because of the Completed token + + override def createQueue(): PartitionHub.Internal.PartitionQueue = + new FixedSizePartitionQueue(lanes, bufferSize) + +} + +/** + * INTERNAL API + */ +@InternalApi private[akka] class FixedSizePartitionQueue(lanes: Int, capacity: Int) extends PartitionHub.Internal.PartitionQueue { + + private val queues = { + val arr = new Array[OneToOneConcurrentArrayQueue[AnyRef]](lanes) + var i = 0 + while (i < arr.length) { + arr(i) = new OneToOneConcurrentArrayQueue(capacity) + i += 1 + } + arr + } + + override def init(id: Long): Unit = () + + override def totalSize: Int = { + var sum = 0 + var i = 0 + while (i < lanes) { + sum += queues(i).size + i += 1 + } + sum + } + + override def size(id: Long): Int = + queues(id.toInt).size + + override def isEmpty(id: Long): Boolean = + queues(id.toInt).isEmpty + + override def nonEmpty(id: Long): Boolean = + !isEmpty(id) + + override def offer(id: Long, elem: Any): Unit = { + if (!queues(id.toInt).offer(elem.asInstanceOf[AnyRef])) + throw new IllegalStateException(s"queue is full, id [$id]") + } + + override def poll(id: Long): AnyRef = + queues(id.toInt).poll() + + override def remove(id: Long): Unit = + queues(id.toInt).clear() + +} diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala index 2b01cb8485..c1d585fd3c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala @@ -46,10 +46,10 @@ private[remote] object FlightRecorderEvents { // Compression events val Compression_CompressedActorRef = 90 val Compression_AllocatedActorRefCompressionId = 91 - val Compression_CompressedManifest = 91 - val Compression_AllocatedManifestCompressionId = 92 - val Compression_Inbound_RunActorRefAdvertisement = 93 - val Compression_Inbound_RunClassManifestAdvertisement = 94 + val Compression_CompressedManifest = 92 + val Compression_AllocatedManifestCompressionId = 93 + val Compression_Inbound_RunActorRefAdvertisement = 94 + val Compression_Inbound_RunClassManifestAdvertisement = 95 // Used for presentation of the entries in the flight recorder lazy val eventDictionary = Map( @@ -92,7 +92,7 @@ private[remote] object FlightRecorderEvents { Compression_CompressedManifest → "Compression: Compressed manifest", Compression_AllocatedManifestCompressionId → "Compression: Allocated manifest compression id", Compression_Inbound_RunActorRefAdvertisement → "InboundCompression: Run class manifest compression advertisement", - Compression_Inbound_RunClassManifestAdvertisement → "InboundCompression: Run class manifest compression advertisement" - ).map { case (int, str) ⇒ int.toLong → str } + Compression_Inbound_RunClassManifestAdvertisement → "InboundCompression: Run class manifest compression advertisement") + .map { case (int, str) ⇒ int.toLong → str } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/TaskRunner.scala b/akka-remote/src/main/scala/akka/remote/artery/TaskRunner.scala index cfbb1bb570..ac259c3963 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/TaskRunner.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/TaskRunner.scala @@ -96,11 +96,11 @@ private[akka] object TaskRunner { else if (idleCpuLevel == 10) new BusySpinIdleStrategy else { - // spin between 100 to 10000 depending on idleCpuLevel + // spin between 1200 to 8900 depending on idleCpuLevel val spinning = 1100 * idleCpuLevel - 1000 val yielding = 5 * idleCpuLevel val minParkNanos = 1 - // park between 250 and 10 micros depending on idleCpuLevel + // park between 220 and 10 micros depending on idleCpuLevel val maxParkNanos = MICROSECONDS.toNanos(280 - 30 * idleCpuLevel) new BackoffIdleStrategy(spinning, yielding, minParkNanos, maxParkNanos) } diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala index 2be4123c2a..3a9073bf3e 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala @@ -204,30 +204,69 @@ private[remote] final class InboundManifestCompression( */ private[remote] object InboundCompression { - object State { - def empty[T] = State( - oldTable = DecompressionTable.disabled[T], + final val KeepOldTablesNumber = 3 // TODO could be configurable + + object Tables { + def empty[T] = Tables( + oldTables = List(DecompressionTable.disabled[T]), activeTable = DecompressionTable.empty[T], nextTable = DecompressionTable.empty[T].copy(version = 1), - advertisementInProgress = None) + advertisementInProgress = None, + keepOldTables = KeepOldTablesNumber) } - final case class State[T]( - oldTable: DecompressionTable[T], + /** + * Encapsulates the various compression tables that Inbound Compression uses. + * + * @param oldTables is guaranteed to always have at-least one and at-most [[keepOldTables]] elements. + * It starts with containing only a single "disabled" table (versioned as `DecompressionTable.DisabledVersion`), + * and from there on continiously accumulates at most [[keepOldTables]] recently used tables. + */ + final case class Tables[T]( + oldTables: List[DecompressionTable[T]], activeTable: DecompressionTable[T], nextTable: DecompressionTable[T], - advertisementInProgress: Option[CompressionTable[T]]) { + advertisementInProgress: Option[CompressionTable[T]], + keepOldTables: Int) { - def startUsingNextTable(): State[T] = { + def selectTable(version: Int): OptionVal[DecompressionTable[T]] = { + if (activeTable.version == version) { + if (ArterySettings.Compression.Debug) println(s"[compress] Found table [version: ${version}], was [ACTIVE]${activeTable}") + OptionVal.Some(activeTable) + } else { + @tailrec def find(tables: List[DecompressionTable[T]]): OptionVal[DecompressionTable[T]] = { + tables match { + case Nil ⇒ OptionVal.None + case t :: tail ⇒ + if (t.version == version) OptionVal.Some(t) + else find(tail) + } + } + val found = find(oldTables) + + if (ArterySettings.Compression.Debug) { + found match { + case OptionVal.Some(t) ⇒ + println(s"[compress] Found table [version: ${version}], was [OLD][${t}], old tables: [${oldTables.map(_.version)}]") + case OptionVal.None ⇒ + println(s"[compress] Did not find table [version: ${version}], old tables: [${oldTables.map(_.version)}], activeTable: ${activeTable}, nextTable: ${nextTable}") + } + } + found + } + } + + def startUsingNextTable(): Tables[T] = { def incrementTableVersion(version: Byte): Byte = if (version == 127) 0 else (version + 1).toByte - State( - oldTable = activeTable, + Tables( + oldTables = (activeTable :: oldTables).take(keepOldTables), activeTable = nextTable, nextTable = DecompressionTable.empty[T].copy(version = incrementTableVersion(nextTable.version)), - advertisementInProgress = None) + advertisementInProgress = None, + keepOldTables = keepOldTables) } } @@ -246,7 +285,7 @@ private[remote] abstract class InboundCompression[T >: Null]( inboundContext: InboundContext, val heavyHitters: TopHeavyHitters[T]) { - private[this] var state: InboundCompression.State[T] = InboundCompression.State.empty + private[this] var tables: InboundCompression.Tables[T] = InboundCompression.Tables.empty // We should not continue sending advertisements to an association that might be dead (not quarantined yet) @volatile private[this] var alive = true @@ -270,44 +309,48 @@ private[remote] abstract class InboundCompression[T >: Null]( */ @tailrec final def decompressInternal(incomingTableVersion: Byte, idx: Int, attemptCounter: Int): OptionVal[T] = { // effectively should never loop more than once, to avoid infinite recursion blow up eagerly - if (attemptCounter > 2) throw new IllegalStateException(s"Unable to decompress $idx from table $incomingTableVersion. Internal state: ${state}") + if (attemptCounter > 2) throw new IllegalStateException(s"Unable to decompress $idx from table $incomingTableVersion. Internal tables: $tables") - val current = state - val oldVersion = current.oldTable.version + val current = tables val activeVersion = current.activeTable.version + def incomingVersionIsAdvertisementInProgress(incomingTableVersion: Byte): Boolean = + current.advertisementInProgress.isDefined && + incomingTableVersion == current.advertisementInProgress.get.version - if (incomingTableVersion == DecompressionTable.DisabledVersion) OptionVal.None // no compression, bail out early - else if (incomingTableVersion == activeVersion) { - val value: T = current.activeTable.get(idx) - if (value != null) OptionVal.Some[T](value) - else throw new UnknownCompressedIdException(idx) - } else if (incomingTableVersion == oldVersion) { - // must handle one old table due to messages in flight during advertisement - val value: T = current.oldTable.get(idx) - if (value != null) OptionVal.Some[T](value) - else throw new UnknownCompressedIdException(idx) - } else if (current.advertisementInProgress.isDefined && incomingTableVersion == current.advertisementInProgress.get.version) { - log.debug( - "Received first value from originUid [{}] compressed using the advertised compression table, flipping to it (version: {})", - originUid, current.nextTable.version) - confirmAdvertisement(incomingTableVersion) - decompressInternal(incomingTableVersion, idx, attemptCounter + 1) // recurse - } else { - // which means that incoming version was > nextTable.version, which likely that - // it is using a table that was built for previous incarnation of this system - log.warning( - "Inbound message from originUid [{}] is using unknown compression table version. " + - "It may have been sent with compression table built for previous incarnation of this system. " + - "Versions activeTable: {}, nextTable: {}, incomingTable: {}", - originUid, activeVersion, current.nextTable.version, incomingTableVersion) + if (incomingTableVersion == DecompressionTable.DisabledVersion) { + // no compression, bail out early OptionVal.None + } else { + current.selectTable(version = incomingTableVersion) match { + case OptionVal.Some(selectedTable) ⇒ + val value: T = selectedTable.get(idx) + if (value != null) OptionVal.Some[T](value) + else throw new UnknownCompressedIdException(idx) + + case _ if incomingVersionIsAdvertisementInProgress(incomingTableVersion) ⇒ + log.debug( + "Received first value from originUid [{}] compressed using the advertised compression table, flipping to it (version: {})", + originUid, current.nextTable.version) + confirmAdvertisement(incomingTableVersion) + decompressInternal(incomingTableVersion, idx, attemptCounter + 1) // recurse + + case _ ⇒ + // which means that incoming version was > nextTable.version, which likely that + // it is using a table that was built for previous incarnation of this system + log.warning( + "Inbound message from originUid [{}] is using unknown compression table version. " + + "It may have been sent with compression table built for previous incarnation of this system. " + + "Versions activeTable: {}, nextTable: {}, incomingTable: {}", + originUid, activeVersion, current.nextTable.version, incomingTableVersion) + OptionVal.None + } } } final def confirmAdvertisement(tableVersion: Byte): Unit = { - state.advertisementInProgress match { + tables.advertisementInProgress match { case Some(inProgress) if tableVersion == inProgress.version ⇒ - state = state.startUsingNextTable() + tables = tables.startUsingNextTable() log.debug("Confirmed compression table version [{}] for originUid [{}]", tableVersion, originUid) case Some(inProgress) if tableVersion != inProgress.version ⇒ log.debug( @@ -347,16 +390,16 @@ private[remote] abstract class InboundCompression[T >: Null]( * Triggers compression table advertisement. May be triggered by schedule or manually, i.e. for testing. */ private[remote] def runNextTableAdvertisement(): Unit = { - if (ArterySettings.Compression.Debug) println(s"[compress] runNextTableAdvertisement, state = ${state}") - state.advertisementInProgress match { + if (ArterySettings.Compression.Debug) println(s"[compress] runNextTableAdvertisement, tables = $tables") + tables.advertisementInProgress match { case None ⇒ inboundContext.association(originUid) match { case OptionVal.Some(association) ⇒ if (alive) { - val table = prepareCompressionAdvertisement(state.nextTable.version) + val table = prepareCompressionAdvertisement(tables.nextTable.version) // TODO expensive, check if building the other way wouldn't be faster? - val nextState = state.copy(nextTable = table.invert, advertisementInProgress = Some(table)) - state = nextState + val nextState = tables.copy(nextTable = table.invert, advertisementInProgress = Some(table)) + tables = nextState alive = false // will be set to true on first incoming message resendCount = 0 advertiseCompressionTable(association, table) diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala index 12232279f6..20d03ac94d 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala @@ -3,6 +3,8 @@ */ package akka.remote.serialization +import java.io.NotSerializableException + import akka.actor.{ ActorRef, Address, ExtendedActorSystem } import akka.protobuf.MessageLite import akka.remote.RemoteWatcher.ArteryHeartbeatRsp @@ -91,7 +93,7 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste case ClassManifestCompressionAdvertisementAckManifest ⇒ deserializeCompressionTableAdvertisementAck(bytes, ClassManifestCompressionAdvertisementAck) case ArteryHeartbeatManifest ⇒ RemoteWatcher.ArteryHeartbeat case ArteryHeartbeatRspManifest ⇒ deserializeArteryHeartbeatRsp(bytes, ArteryHeartbeatRsp) - case _ ⇒ throw new IllegalArgumentException(s"Manifest '$manifest' not defined for ArteryControlMessageSerializer (serializer id $identifier)") + case _ ⇒ throw new NotSerializableException(s"Manifest '$manifest' not defined for ArteryControlMessageSerializer (serializer id $identifier)") } import scala.collection.JavaConverters._ diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala index ecc19c6241..421db500d3 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala @@ -468,10 +468,10 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA handle ← if (isDatagram) Future { readyChannel.getRemoteAddress match { - case addr: InetSocketAddress ⇒ + case address: InetSocketAddress ⇒ val handle = new UdpAssociationHandle(localAddress, remoteAddress, readyChannel, NettyTransport.this) handle.readHandlerPromise.future.foreach { - listener ⇒ udpConnectionTable.put(addr, listener) + listener ⇒ udpConnectionTable.put(address, listener) } handle case unknown ⇒ throw new NettyTransportException(s"Unknown outbound remote address type [${unknown.getClass.getName}]") diff --git a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala index 584e37870e..093748ff39 100644 --- a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala @@ -13,7 +13,7 @@ import scala.collection.JavaConverters._ class DaemonicSpec extends AkkaSpec { - def addr(sys: ActorSystem, proto: String) = + def getOtherAddress(sys: ActorSystem, proto: String) = sys.asInstanceOf[ExtendedActorSystem].provider.getExternalAddressFor(Address(s"akka.$proto", "", "", 0)).get def unusedPort = { @@ -38,7 +38,7 @@ class DaemonicSpec extends AkkaSpec { akka.log-dead-letters-during-shutdown = off """)) - val unusedAddress = addr(daemonicSystem, "tcp").copy(port = Some(unusedPort)) + val unusedAddress = getOtherAddress(daemonicSystem, "tcp").copy(port = Some(unusedPort)) val selection = daemonicSystem.actorSelection(s"${unusedAddress}/user/SomeActor") selection ! "whatever" Thread.sleep(2.seconds.dilated.toMillis) diff --git a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala index 2d45af4cf6..3534051896 100644 --- a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala @@ -144,11 +144,11 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "/gonk" → "tcp", "/zagzag" → "udp", "/roghtaar" → "ssl.tcp") - ) deploy(system, Deploy(name, scope = RemoteScope(addr(remoteSystem, proto)))) + ) deploy(system, Deploy(name, scope = RemoteScope(getOtherAddress(remoteSystem, proto)))) - def addr(sys: ActorSystem, proto: String) = + def getOtherAddress(sys: ActorSystem, proto: String) = sys.asInstanceOf[ExtendedActorSystem].provider.getExternalAddressFor(Address(s"akka.$proto", "", "", 0)).get - def port(sys: ActorSystem, proto: String) = addr(sys, proto).port.get + def port(sys: ActorSystem, proto: String) = getOtherAddress(sys, proto).port.get def deploy(sys: ActorSystem, d: Deploy) { sys.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].deployer.deploy(d) } @@ -239,8 +239,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D EventFilter.warning(pattern = "received dead letter.*"))) sys.actorOf(Props[Echo2], name = "echo") } - val moreRefs = moreSystems map (sys ⇒ system.actorSelection(RootActorPath(addr(sys, "tcp")) / "user" / "echo")) - val aliveEcho = system.actorSelection(RootActorPath(addr(remoteSystem, "tcp")) / "user" / "echo") + val moreRefs = moreSystems map (sys ⇒ system.actorSelection(RootActorPath(getOtherAddress(sys, "tcp")) / "user" / "echo")) + val aliveEcho = system.actorSelection(RootActorPath(getOtherAddress(remoteSystem, "tcp")) / "user" / "echo") val n = 100 // first everything is up and running @@ -549,13 +549,13 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D try { val otherGuy = otherSystem.actorOf(Props[Echo2], "other-guy") // check that we use the specified transport address instead of the default - val otherGuyRemoteTcp = otherGuy.path.toSerializationFormatWithAddress(addr(otherSystem, "tcp")) + val otherGuyRemoteTcp = otherGuy.path.toSerializationFormatWithAddress(getOtherAddress(otherSystem, "tcp")) val remoteEchoHereTcp = system.actorFor(s"akka.tcp://remote-sys@localhost:${port(remoteSystem, "tcp")}/user/echo") val proxyTcp = system.actorOf(Props(classOf[Proxy], remoteEchoHereTcp, testActor), "proxy-tcp") proxyTcp ! otherGuy expectMsg(3.seconds, ("pong", otherGuyRemoteTcp)) // now check that we fall back to default when we haven't got a corresponding transport - val otherGuyRemoteTest = otherGuy.path.toSerializationFormatWithAddress(addr(otherSystem, "test")) + val otherGuyRemoteTest = otherGuy.path.toSerializationFormatWithAddress(getOtherAddress(otherSystem, "test")) val remoteEchoHereSsl = system.actorFor(s"akka.ssl.tcp://remote-sys@localhost:${port(remoteSystem, "ssl.tcp")}/user/echo") val proxySsl = system.actorOf(Props(classOf[Proxy], remoteEchoHereSsl, testActor), "proxy-ssl") EventFilter.warning(start = "Error while resolving ActorRef", occurrences = 1).intercept { diff --git a/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala b/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala index ebc68c90fa..89c1186128 100644 --- a/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala @@ -73,21 +73,21 @@ akka.loglevel = DEBUG akka.actor.provider = remote akka.remote.netty.tcp.port = 0 """)) - val addr = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val receptionist = system.actorOf(Props(classOf[Receptionist], testActor), "receptionist") lazy val remoteDaemon = { { val p = TestProbe()(client) - client.actorSelection(RootActorPath(addr) / receptionist.path.elements).tell(IdentifyReq("/remote"), p.ref) + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell(IdentifyReq("/remote"), p.ref) p.expectMsgType[ActorIdentity].ref.get } } lazy val target2 = { val p = TestProbe()(client) - client.actorSelection(RootActorPath(addr) / receptionist.path.elements).tell( + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell( IdentifyReq("child2"), p.ref) p.expectMsgType[ActorIdentity].ref.get } @@ -102,7 +102,7 @@ akka.loglevel = DEBUG "UntrustedMode" must { "allow actor selection to configured white list" in { - val sel = client.actorSelection(RootActorPath(addr) / receptionist.path.elements) + val sel = client.actorSelection(RootActorPath(address) / receptionist.path.elements) sel ! "hello" expectMsg("hello") } @@ -144,14 +144,14 @@ akka.loglevel = DEBUG } "discard actor selection" in { - val sel = client.actorSelection(RootActorPath(addr) / testActor.path.elements) + val sel = client.actorSelection(RootActorPath(address) / testActor.path.elements) sel ! "hello" expectNoMsg(1.second) } "discard actor selection with non root anchor" in { val p = TestProbe()(client) - client.actorSelection(RootActorPath(addr) / receptionist.path.elements).tell( + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell( Identify(None), p.ref) val clientReceptionistRef = p.expectMsgType[ActorIdentity].ref.get @@ -161,19 +161,19 @@ akka.loglevel = DEBUG } "discard actor selection to child of matching white list" in { - val sel = client.actorSelection(RootActorPath(addr) / receptionist.path.elements / "child1") + val sel = client.actorSelection(RootActorPath(address) / receptionist.path.elements / "child1") sel ! "hello" expectNoMsg(1.second) } "discard actor selection with wildcard" in { - val sel = client.actorSelection(RootActorPath(addr) / receptionist.path.elements / "*") + val sel = client.actorSelection(RootActorPath(address) / receptionist.path.elements / "*") sel ! "hello" expectNoMsg(1.second) } "discard actor selection containing harmful message" in { - val sel = client.actorSelection(RootActorPath(addr) / receptionist.path.elements) + val sel = client.actorSelection(RootActorPath(address) / receptionist.path.elements) sel ! PoisonPill expectNoMsg(1.second) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala index 9eac28541e..1b3fdc5b43 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala @@ -9,6 +9,7 @@ import akka.testkit.{ AkkaSpec, ImplicitSender, TestActors, TestProbe } import com.typesafe.config.{ Config, ConfigFactory } import scala.concurrent.duration._ +import akka.actor.ActorSelection class RemoteSendConsistencySpec extends AbstractRemoteSendConsistencySpec(ArterySpecSupport.defaultConfig) @@ -33,18 +34,26 @@ abstract class AbstractRemoteSendConsistencySpec(config: Config) extends ArteryM } }), "echo") - val remoteRef = { + val echoSel = system.actorSelection(rootB / "user" / "echo") + val echoRef = { system.actorSelection(rootB / "user" / "echo") ! Identify(None) expectMsgType[ActorIdentity](5.seconds).ref.get } - remoteRef ! "ping" + echoRef ! "ping" expectMsg("pong") - remoteRef ! "ping" + echoRef ! "ping" expectMsg("pong") - remoteRef ! "ping" + echoRef ! "ping" + expectMsg("pong") + + // and actorSelection + echoSel ! "ping" + expectMsg("pong") + + echoSel ! "ping" expectMsg("pong") } @@ -116,6 +125,45 @@ abstract class AbstractRemoteSendConsistencySpec(config: Config) extends ArteryM } } + "be able to send messages with actorSelection concurrently preserving order" in { + systemB.actorOf(TestActors.echoActorProps, "echoA2") + systemB.actorOf(TestActors.echoActorProps, "echoB2") + systemB.actorOf(TestActors.echoActorProps, "echoC2") + + val selA = system.actorSelection(rootB / "user" / "echoA2") + val selB = system.actorSelection(rootB / "user" / "echoB2") + val selC = system.actorSelection(rootB / "user" / "echoC2") + + def senderProps(sel: ActorSelection) = Props(new Actor { + var counter = 1000 + sel ! counter + + override def receive: Receive = { + case i: Int ⇒ + if (i != counter) testActor ! s"Failed, expected $counter got $i" + else if (counter == 0) { + testActor ! "success2" + context.stop(self) + } else { + counter -= 1 + sel ! counter + } + } + }).withDeploy(Deploy.local) + + system.actorOf(senderProps(selA)) + system.actorOf(senderProps(selB)) + system.actorOf(senderProps(selC)) + system.actorOf(senderProps(selA)) + + within(10.seconds) { + expectMsg("success2") + expectMsg("success2") + expectMsg("success2") + expectMsg("success2") + } + } + } } diff --git a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala index bc4f42a4aa..733e75d3d7 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala @@ -129,7 +129,7 @@ class SystemMessageDeliverySpec extends ArteryMultiNodeSpec(ArterySpecSupport.de expectMsg("hello") systemC.terminate() // DeathWatchNotification is sent from systemC, failure detection takes longer than 3 seconds - expectTerminated(remoteRef, 5.seconds) + expectTerminated(remoteRef, 10.seconds) } finally { shutdown(systemC) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala index f7213fd53d..b88ff55912 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala @@ -74,21 +74,21 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli import UntrustedSpec._ val client = newRemoteSystem(name = Some("UntrustedSpec-client")) - val addr = RARP(system).provider.getDefaultAddress + val address = RARP(system).provider.getDefaultAddress val receptionist = system.actorOf(Props(classOf[Receptionist], testActor), "receptionist") lazy val remoteDaemon = { { val p = TestProbe()(client) - client.actorSelection(RootActorPath(addr) / receptionist.path.elements).tell(IdentifyReq("/remote"), p.ref) + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell(IdentifyReq("/remote"), p.ref) p.expectMsgType[ActorIdentity].ref.get } } lazy val target2 = { val p = TestProbe()(client) - client.actorSelection(RootActorPath(addr) / receptionist.path.elements).tell( + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell( IdentifyReq("child2"), p.ref) p.expectMsgType[ActorIdentity].ref.get } @@ -99,7 +99,7 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli "UntrustedMode" must { "allow actor selection to configured white list" in { - val sel = client.actorSelection(RootActorPath(addr) / receptionist.path.elements) + val sel = client.actorSelection(RootActorPath(address) / receptionist.path.elements) sel ! "hello" expectMsg("hello") } @@ -141,14 +141,14 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli } "discard actor selection" in { - val sel = client.actorSelection(RootActorPath(addr) / testActor.path.elements) + val sel = client.actorSelection(RootActorPath(address) / testActor.path.elements) sel ! "hello" expectNoMsg(1.second) } "discard actor selection with non root anchor" in { val p = TestProbe()(client) - client.actorSelection(RootActorPath(addr) / receptionist.path.elements).tell( + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell( Identify(None), p.ref) val clientReceptionistRef = p.expectMsgType[ActorIdentity].ref.get @@ -158,19 +158,19 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli } "discard actor selection to child of matching white list" in { - val sel = client.actorSelection(RootActorPath(addr) / receptionist.path.elements / "child1") + val sel = client.actorSelection(RootActorPath(address) / receptionist.path.elements / "child1") sel ! "hello" expectNoMsg(1.second) } "discard actor selection with wildcard" in { - val sel = client.actorSelection(RootActorPath(addr) / receptionist.path.elements / "*") + val sel = client.actorSelection(RootActorPath(address) / receptionist.path.elements / "*") sel ! "hello" expectNoMsg(1.second) } "discard actor selection containing harmful message" in { - val sel = client.actorSelection(RootActorPath(addr) / receptionist.path.elements) + val sel = client.actorSelection(RootActorPath(address) / receptionist.path.elements) sel ! PoisonPill expectNoMsg(1.second) } diff --git a/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala index e20aabca6f..3114182229 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala @@ -4,6 +4,8 @@ package akka.remote.serialization +import java.io.NotSerializableException + import akka.actor._ import akka.remote.{ RemoteWatcher, UniqueAddress } import akka.remote.artery.OutboundHandshake.{ HandshakeReq, HandshakeRsp } @@ -55,7 +57,7 @@ class ArteryMessageSerializerSpec extends AkkaSpec { } "reject deserialization with invalid manifest" in { - intercept[IllegalArgumentException] { + intercept[NotSerializableException] { val serializer = new ArteryMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) serializer.fromBinary(Array.empty[Byte], "INVALID") } diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala index 9bb62ca3f8..8573e54765 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala @@ -7,12 +7,9 @@ package akka.event.slf4j import org.slf4j.{ MDC, Marker, MarkerFactory, Logger ⇒ SLFLogger, LoggerFactory ⇒ SLFLoggerFactory } import akka.event.Logging._ import akka.actor._ -import akka.event.DummyClassForStringSources +import akka.event.{ LogMarker, _ } import akka.util.Helpers -import akka.event.LoggingFilter -import akka.event.EventStream import akka.dispatch.RequiresMessageQueue -import akka.event.LoggerMessageQueueSemantics /** * Base trait for all classes that wants to be able use the SLF4J logging infrastructure. @@ -111,8 +108,12 @@ class Slf4jLogger extends Actor with SLF4JLogging with RequiresMessageQueue[Logg private final def markerIfPresent(event: LogEvent): Marker = event match { - case m: LogEventWithMarker ⇒ MarkerFactory.getMarker(m.marker.name) - case _ ⇒ null + case m: LogEventWithMarker ⇒ + m.marker match { + case slf4jMarker: Slf4jLogMarker ⇒ slf4jMarker.marker + case marker ⇒ MarkerFactory.getMarker(marker.name) + } + case _ ⇒ null } /** @@ -141,3 +142,14 @@ class Slf4jLoggingFilter(settings: ActorSystem.Settings, eventStream: EventStrea def isDebugEnabled(logClass: Class[_], logSource: String) = (eventStream.logLevel >= DebugLevel) && Logger(logClass, logSource).isDebugEnabled } + +/** Wraps [[org.slf4j.Marker]] */ +final class Slf4jLogMarker(val marker: org.slf4j.Marker) extends LogMarker(name = marker.getName) + +/** Factory for creating [[LogMarker]] that wraps [[org.slf4j.Marker]] */ +object Slf4jLogMarker { + def apply(marker: org.slf4j.Marker): Slf4jLogMarker = new Slf4jLogMarker(marker) + + /** Java API */ + def create(marker: org.slf4j.Marker): Slf4jLogMarker = apply(marker) +} diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala index 415dd8d781..88dee54b45 100644 --- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala +++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala @@ -13,6 +13,7 @@ import ch.qos.logback.core.OutputStreamAppender import java.io.ByteArrayOutputStream import org.scalatest.BeforeAndAfterEach +import org.slf4j.{ Marker, MarkerFactory } object Slf4jLoggerSpec { @@ -28,6 +29,8 @@ object Slf4jLoggerSpec { final case class StringWithMDC(s: String, mdc: Map[String, Any]) final case class StringWithMarker(s: String, marker: LogMarker) + final case class StringWithSlf4jMarker(s: String, marker: Marker) + final case class StringWithSlf4jMarkerMDC(s: String, marker: Marker, mdc: Map[String, Any]) final class LogProducer extends Actor with DiagnosticActorLogging { @@ -38,6 +41,12 @@ object Slf4jLoggerSpec { log.error(e, e.getMessage) case (s: String, x: Int, y: Int) ⇒ log.info(s, x, y) + case StringWithSlf4jMarker(s, m) ⇒ + markLog.info(Slf4jLogMarker(m), s) + case StringWithSlf4jMarkerMDC(s, mark, mdc) ⇒ + markLog.mdc(mdc) + markLog.info(Slf4jLogMarker(mark), s) + markLog.clearMDC() case StringWithMDC(s, mdc) ⇒ log.mdc(mdc) log.info(s) @@ -110,6 +119,28 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft s should include("msg=[security-wise interesting message]") } + "log info with slf4j marker" in { + val slf4jMarker = MarkerFactory.getMarker("SLF") + slf4jMarker.add(MarkerFactory.getMarker("ADDED")) // slf4j markers can have children + producer ! StringWithSlf4jMarker("security-wise interesting message", slf4jMarker) + + awaitCond(outputString.contains("----"), 5 seconds) + val s = outputString + s should include("marker=[SLF [ ADDED ]]") + s should include("msg=[security-wise interesting message]") + } + "log info with slf4j marker and MDC" in { + val slf4jMarker = MarkerFactory.getMarker("SLF") + slf4jMarker.add(MarkerFactory.getMarker("ADDED")) // slf4j markers can have children + producer ! StringWithSlf4jMarkerMDC("security-wise interesting message", slf4jMarker, Map("ticketNumber" → 3671, "ticketDesc" → "Custom MDC Values")) + + awaitCond(outputString.contains("----"), 5 seconds) + val s = outputString + s should include("marker=[SLF [ ADDED ]]") + s should include("mdc=[ticket-#3671: Custom MDC Values]") + s should include("msg=[security-wise interesting message]") + } + "put custom MDC values when specified" in { producer ! StringWithMDC("Message with custom MDC values", Map("ticketNumber" → 3671, "ticketDesc" → "Custom MDC Values")) diff --git a/akka-stream-testkit/src/main/mima-filters/2.4.14.backwards.excludes b/akka-stream-testkit/src/main/mima-filters/2.4.14.backwards.excludes new file mode 100644 index 0000000000..05e1bab0b1 --- /dev/null +++ b/akka-stream-testkit/src/main/mima-filters/2.4.14.backwards.excludes @@ -0,0 +1,2 @@ +# #20737 aligned test sink and test source stage factory methods types +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.testkit.TestSinkStage.apply") diff --git a/akka-stream-testkit/src/main/mima-filters/2.4.x.backwards.excludes b/akka-stream-testkit/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..2084161936 --- /dev/null +++ b/akka-stream-testkit/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,3 @@ +# small changes in attributes +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.testkit.StreamTestKit#ProbeSource.withAttributes") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.testkit.StreamTestKit#ProbeSink.withAttributes") diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala index 69804a0728..bb075af767 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala @@ -3,17 +3,21 @@ */ package akka.stream.testkit -import akka.actor.{ ActorSystem, DeadLetterSuppression, NoSerializationVerificationNeeded } +import akka.actor.{ ActorRef, ActorSystem, DeadLetterSuppression, NoSerializationVerificationNeeded } import akka.stream._ import akka.stream.impl._ -import akka.testkit.TestProbe +import akka.testkit.{ TestActor, TestProbe } import org.reactivestreams.{ Publisher, Subscriber, Subscription } + import scala.annotation.tailrec import scala.collection.immutable import scala.concurrent.duration._ import scala.language.existentials import java.io.StringWriter import java.io.PrintWriter +import java.util.concurrent.CountDownLatch + +import akka.testkit.TestActor.{ AutoPilot, NoAutoPilot } /** * Provides factory methods for various Publishers. @@ -27,6 +31,8 @@ object TestPublisher { final case class CancelSubscription(subscription: Subscription) extends PublisherEvent final case class RequestMore(subscription: Subscription, elements: Long) extends PublisherEvent + final object SubscriptionDone extends NoSerializationVerificationNeeded + /** * Publisher that signals complete to subscribers, after handing a void subscription. */ @@ -74,6 +80,15 @@ object TestPublisher { private val probe: TestProbe = TestProbe() + //this is a way to pause receiving message from probe until subscription is done + private val subscribed = new CountDownLatch(1) + probe.ignoreMsg { case SubscriptionDone ⇒ true } + probe.setAutoPilot(new TestActor.AutoPilot() { + override def run(sender: ActorRef, msg: Any): AutoPilot = { + if (msg == SubscriptionDone) subscribed.countDown() + this + } + }) private val self = this.asInstanceOf[Self] /** @@ -83,18 +98,26 @@ object TestPublisher { val subscription: PublisherProbeSubscription[I] = new PublisherProbeSubscription[I](subscriber, probe) probe.ref ! Subscribe(subscription) if (autoOnSubscribe) subscriber.onSubscribe(subscription) + probe.ref ! SubscriptionDone + } + + def executeAfterSubscription[T](f: ⇒ T): T = { + subscribed.await( + probe.testKitSettings.DefaultTimeout.duration.length, + probe.testKitSettings.DefaultTimeout.duration.unit) + f } /** * Expect a subscription. */ def expectSubscription(): PublisherProbeSubscription[I] = - probe.expectMsgType[Subscribe].subscription.asInstanceOf[PublisherProbeSubscription[I]] + executeAfterSubscription { probe.expectMsgType[Subscribe].subscription.asInstanceOf[PublisherProbeSubscription[I]] } /** * Expect demand from a given subscription. */ - def expectRequest(subscription: Subscription, n: Int): Self = { + def expectRequest(subscription: Subscription, n: Int): Self = executeAfterSubscription { probe.expectMsg(RequestMore(subscription, n)) self } @@ -102,7 +125,7 @@ object TestPublisher { /** * Expect no messages. */ - def expectNoMsg(): Self = { + def expectNoMsg(): Self = executeAfterSubscription { probe.expectNoMsg() self } @@ -110,7 +133,7 @@ object TestPublisher { /** * Expect no messages for a given duration. */ - def expectNoMsg(max: FiniteDuration): Self = { + def expectNoMsg(max: FiniteDuration): Self = executeAfterSubscription { probe.expectNoMsg(max) self } @@ -119,10 +142,10 @@ object TestPublisher { * Receive messages for a given duration or until one does not match a given partial function. */ def receiveWhile[T](max: Duration = Duration.Undefined, idle: Duration = Duration.Inf, messages: Int = Int.MaxValue)(f: PartialFunction[PublisherEvent, T]): immutable.Seq[T] = - probe.receiveWhile(max, idle, messages)(f.asInstanceOf[PartialFunction[AnyRef, T]]) + executeAfterSubscription { probe.receiveWhile(max, idle, messages)(f.asInstanceOf[PartialFunction[AnyRef, T]]) } def expectEventPF[T](f: PartialFunction[PublisherEvent, T]): T = - probe.expectMsgPF[T]()(f.asInstanceOf[PartialFunction[Any, T]]) + executeAfterSubscription { probe.expectMsgPF[T]()(f.asInstanceOf[PartialFunction[Any, T]]) } def getPublisher: Publisher[I] = this @@ -142,12 +165,12 @@ object TestPublisher { * } * }}} */ - def within[T](min: FiniteDuration, max: FiniteDuration)(f: ⇒ T): T = probe.within(min, max)(f) + def within[T](min: FiniteDuration, max: FiniteDuration)(f: ⇒ T): T = executeAfterSubscription { probe.within(min, max)(f) } /** * Same as calling `within(0 seconds, max)(f)`. */ - def within[T](max: FiniteDuration)(f: ⇒ T): T = probe.within(max)(f) + def within[T](max: FiniteDuration)(f: ⇒ T): T = executeAfterSubscription { probe.within(max)(f) } } /** diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala index 6482135c7d..d67c4f16af 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala @@ -18,7 +18,7 @@ abstract class AkkaIdentityProcessorVerification[T](env: TestEnvironment, publis with TestNGSuiteLike with ActorSystemLifecycle { def this(printlnDebug: Boolean) = - this(new TestEnvironment(Timeouts.defaultTimeoutMillis, printlnDebug), Timeouts.publisherShutdownTimeoutMillis) + this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug), Timeouts.publisherShutdownTimeoutMillis) def this() = this(false) diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala index 5c5d3a6edd..da43e483f5 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala @@ -16,7 +16,7 @@ abstract class AkkaPublisherVerification[T](val env: TestEnvironment, publisherS with TestNGSuiteLike with ActorSystemLifecycle { def this(printlnDebug: Boolean) = - this(new TestEnvironment(Timeouts.defaultTimeoutMillis, printlnDebug), Timeouts.publisherShutdownTimeoutMillis) + this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug), Timeouts.publisherShutdownTimeoutMillis) def this() = this(false) diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaSubscriberVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaSubscriberVerification.scala index ea541236bd..d7345362d0 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaSubscriberVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaSubscriberVerification.scala @@ -16,7 +16,7 @@ abstract class AkkaSubscriberBlackboxVerification[T](env: TestEnvironment) with AkkaSubscriberVerificationLike with ActorSystemLifecycle { def this(printlnDebug: Boolean) = - this(new TestEnvironment(Timeouts.defaultTimeoutMillis, printlnDebug)) + this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug)) def this() = this(false) } @@ -26,7 +26,7 @@ abstract class AkkaSubscriberWhiteboxVerification[T](env: TestEnvironment) with AkkaSubscriberVerificationLike { def this(printlnDebug: Boolean) = - this(new TestEnvironment(Timeouts.defaultTimeoutMillis, printlnDebug)) + this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug)) def this() = this(false) } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/Timeouts.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/Timeouts.scala index c9582c8354..a5baa7d393 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/Timeouts.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/Timeouts.scala @@ -12,4 +12,6 @@ object Timeouts { def defaultTimeoutMillis: Int = 800 + def defaultNoSignalsTimeoutMillis: Int = 200 + } diff --git a/akka-stream-tests/src/test/java/akka/stream/javadsl/FlowTest.java b/akka-stream-tests/src/test/java/akka/stream/javadsl/FlowTest.java index a1b7c8d26b..b3e172a4d0 100644 --- a/akka-stream-tests/src/test/java/akka/stream/javadsl/FlowTest.java +++ b/akka-stream-tests/src/test/java/akka/stream/javadsl/FlowTest.java @@ -635,9 +635,9 @@ public class FlowTest extends StreamTest { public void mustBeAbleToUseBatchWeighted() throws Exception { final TestKit probe = new TestKit(system); final List input = Arrays.asList("A", "B", "C"); - final Flow flow = Flow.of(String.class).batchWeighted(3L, new Function() { + final Flow flow = Flow.of(String.class).batchWeighted(3L, new Function() { @Override - public Object apply(String s) throws Exception { + public java.lang.Long apply(String s) throws Exception { return 1L; } }, new Function() { diff --git a/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala index de64c4793d..0e329774de 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala @@ -3,7 +3,7 @@ package akka.stream import akka.actor.{ ActorSystem, Props } import akka.stream.impl.{ PhasedFusingActorMaterializer, StreamSupervisor } import akka.stream.scaladsl.{ Sink, Source } -import akka.stream.testkit.StreamSpec +import akka.stream.testkit.{ StreamSpec, TestPublisher } import akka.testkit.{ ImplicitSender, TestActor } import scala.concurrent.Await @@ -24,7 +24,7 @@ class ActorMaterializerSpec extends StreamSpec with ImplicitSender { "properly shut down actors associated with it" in { val m = ActorMaterializer.create(system) - val f = Source.maybe[Int].runFold(0)(_ + _)(m) + val f = Source.fromPublisher(TestPublisher.probe[Int]()(system)).runFold(0)(_ + _)(m) m.shutdown() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FailedSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FailedSourceSpec.scala new file mode 100644 index 0000000000..ddf3ac082a --- /dev/null +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FailedSourceSpec.scala @@ -0,0 +1,35 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ +package akka.stream.scaladsl + +import akka.stream.ActorMaterializer +import akka.stream.testkit.{ StreamSpec, TestSubscriber, Utils } +import akka.testkit.DefaultTimeout +import org.scalatest.time.{ Millis, Span } + +import scala.concurrent.Await +import scala.concurrent.duration._ +import scala.util.Failure +import scala.util.control.NoStackTrace + +class FailedSourceSpec extends StreamSpec with DefaultTimeout { + + implicit val materializer = ActorMaterializer() + + "The Failed Source" must { + "emit error immediately" in { + val ex = new RuntimeException with NoStackTrace + val p = Source.failed(ex).runWith(Sink.asPublisher(false)) + val c = TestSubscriber.manualProbe[Int]() + p.subscribe(c) + c.expectSubscriptionAndError(ex) + + // reject additional subscriber + val c2 = TestSubscriber.manualProbe[Int]() + p.subscribe(c2) + c2.expectSubscriptionAndError() + } + } + +} \ No newline at end of file diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala index 238f7b4805..7af9f02afb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala @@ -6,12 +6,14 @@ package akka.stream.scaladsl import scala.collection.immutable import scala.concurrent.duration._ import java.util.concurrent.ThreadLocalRandom.{ current ⇒ random } + import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, ThrottleMode } import akka.stream.testkit._ import akka.stream.testkit.Utils._ import scala.concurrent.Await import akka.testkit.TimingTest +import akka.util.ConstantFun class FlowGroupedWithinSpec extends StreamSpec with ScriptedTest { @@ -252,5 +254,26 @@ class FlowGroupedWithinSpec extends StreamSpec with ScriptedTest { וupstream.sendComplete() downstream.expectComplete() } + + "handle zero cost function to get only timed based grouping without limit" taggedAs TimingTest in { + val upstream = TestPublisher.probe[String]() + val downstream = TestSubscriber.probe[immutable.Seq[String]]() + Source + .fromPublisher(upstream) + .groupedWeightedWithin(1, 100.millis)(ConstantFun.zeroLong) + .to(Sink.fromSubscriber(downstream)) + .run() + + downstream.ensureSubscription() + downstream.request(1) + upstream.sendNext("333") + upstream.sendNext("22") + upstream.sendNext("333") + upstream.sendNext("22") + downstream.expectNoMsg(50.millis) + downstream.expectNext(Vector("333", "22", "333", "22"): immutable.Seq[String]) + upstream.sendComplete() + downstream.expectComplete() + } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala index 914ce4c91a..065cf92ede 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala @@ -153,6 +153,16 @@ class FramingSpec extends StreamSpec { val fieldOffsets = List(0, 1, 2, 3, 15, 16, 31, 32, 44, 107) def encode(payload: ByteString, fieldOffset: Int, fieldLength: Int, byteOrder: ByteOrder): ByteString = { + encodeComplexFrame(payload, fieldOffset, fieldLength, byteOrder, ByteString(new Array[Byte](fieldOffset)), ByteString.empty) + } + + def encodeComplexFrame( + payload: ByteString, + fieldOffset: Int, + fieldLength: Int, + byteOrder: ByteOrder, + offset: ByteString, + tail: ByteString): ByteString = { val header = { val h = (new ByteStringBuilder).putInt(payload.size)(byteOrder).result() byteOrder match { @@ -160,8 +170,7 @@ class FramingSpec extends StreamSpec { case ByteOrder.BIG_ENDIAN ⇒ h.drop(4 - fieldLength) } } - - ByteString(new Array[Byte](fieldOffset)) ++ header ++ payload + offset ++ header ++ payload ++ tail } "work with various byte orders, frame lengths and offsets" taggedAs LongRunningTest in { @@ -186,6 +195,41 @@ class FramingSpec extends StreamSpec { } + "work with various byte orders, frame lengths and offsets using computeFrameSize" taggedAs LongRunningTest in { + for { + byteOrder ← byteOrders + fieldOffset ← fieldOffsets + fieldLength ← fieldLengths + } { + + def computeFrameSize(offset: Array[Byte], length: Int): Int = { + val sizeWithoutTail = offset.length + fieldLength + length + if (offset.length > 0) offset(0) + sizeWithoutTail else sizeWithoutTail + } + + def offset(): Array[Byte] = { + val arr = new Array[Byte](fieldOffset) + if (arr.length > 0) arr(0) = Random.nextInt(128).toByte + arr + } + + val encodedFrames = frameLengths.filter(_ < (1L << (fieldLength * 8))).map { length ⇒ + val payload = referenceChunk.take(length) + val offsetBytes = offset() + val tailBytes = if (offsetBytes.length > 0) new Array[Byte](offsetBytes(0)) else Array.empty[Byte] + encodeComplexFrame(payload, fieldOffset, fieldLength, byteOrder, ByteString(offsetBytes), ByteString(tailBytes)) + } + + Source(encodedFrames) + .via(rechunk) + .via(Framing.lengthField(fieldLength, fieldOffset, Int.MaxValue, byteOrder, computeFrameSize)) + .grouped(10000) + .runWith(Sink.head) + .futureValue(Timeout(5.seconds)) should ===(encodedFrames) + } + + } + "work with empty streams" in { Source.empty.via(Framing.lengthField(4, 0, Int.MaxValue, ByteOrder.BIG_ENDIAN)) .runFold(Vector.empty[ByteString])(_ :+ _) @@ -284,6 +328,25 @@ class FramingSpec extends StreamSpec { ex.getMessage should ===("Decoded frame header reported negative size -4") } + "fail the stage on computeFrameSize values less than minimum chunk size" in { + implicit val bo = java.nio.ByteOrder.LITTLE_ENDIAN + + def computeFrameSize(arr: Array[Byte], l: Int): Int = 3 + + // A 4-byte message containing only an Int specifying the length of the payload + val bs = ByteString.newBuilder.putInt(4).result() + + val res = + Source + .single(bs) + .via(Flow[ByteString].via(Framing.lengthField(4, 0, 1000, bo, computeFrameSize))) + .runWith(Sink.seq) + + val ex = res.failed.futureValue + ex shouldBe a[FramingException] + ex.getMessage should ===("Computed frame size 3 is less than minimum chunk size 4") + } + "let zero length field values pass through (#22367)" in { implicit val bo = java.nio.ByteOrder.LITTLE_ENDIAN diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala index dece30419a..9897e3a2a4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala @@ -327,7 +327,7 @@ class HubSpec extends StreamSpec { downstream2.expectError(TE("Failed")) } - "properly singal completion to consumers arriving after producer finished" in assertAllStagesStopped { + "properly signal completion to consumers arriving after producer finished" in assertAllStagesStopped { val source = Source.empty[Int].runWith(BroadcastHub.sink(8)) // Wait enough so the Hub gets the completion. This is racy, but this is fine because both // cases should work in the end @@ -354,7 +354,7 @@ class HubSpec extends StreamSpec { sink2Probe.expectComplete() } - "properly singal error to consumers arriving after producer finished" in assertAllStagesStopped { + "properly signal error to consumers arriving after producer finished" in assertAllStagesStopped { val source = Source.failed(TE("Fail!")).runWith(BroadcastHub.sink(8)) // Wait enough so the Hub gets the completion. This is racy, but this is fine because both // cases should work in the end @@ -367,4 +367,241 @@ class HubSpec extends StreamSpec { } + "PartitionHub" must { + + "work in the happy case with one stream" in assertAllStagesStopped { + val source = Source(1 to 10).runWith(PartitionHub.sink((size, elem) ⇒ 0, startAfterNrOfConsumers = 0, bufferSize = 8)) + source.runWith(Sink.seq).futureValue should ===(1 to 10) + } + + "work in the happy case with two streams" in assertAllStagesStopped { + val source = Source(0 until 10).runWith(PartitionHub.sink((size, elem) ⇒ elem % size, startAfterNrOfConsumers = 2, bufferSize = 8)) + val result1 = source.runWith(Sink.seq) + // it should not start publishing until startAfterNrOfConsumers = 2 + Thread.sleep(20) + val result2 = source.runWith(Sink.seq) + result1.futureValue should ===(0 to 8 by 2) + result2.futureValue should ===(1 to 9 by 2) + } + + "be able to use as round-robin router" in assertAllStagesStopped { + val source = Source(0 until 10).runWith(PartitionHub.statefulSink(() ⇒ { + var n = 0L + + (info, elem) ⇒ { + n += 1 + info.consumerIdByIdx((n % info.size).toInt) + } + }, startAfterNrOfConsumers = 2, bufferSize = 8)) + val result1 = source.runWith(Sink.seq) + val result2 = source.runWith(Sink.seq) + result1.futureValue should ===(1 to 9 by 2) + result2.futureValue should ===(0 to 8 by 2) + } + + "be able to use as sticky session router" in assertAllStagesStopped { + val source = Source(List("usr-1", "usr-2", "usr-1", "usr-3")).runWith(PartitionHub.statefulSink(() ⇒ { + var sessions = Map.empty[String, Long] + var n = 0L + + (info, elem) ⇒ { + sessions.get(elem) match { + case Some(id) if info.consumerIds.exists(_ == id) ⇒ id + case _ ⇒ + n += 1 + val id = info.consumerIdByIdx((n % info.size).toInt) + sessions = sessions.updated(elem, id) + id + } + } + }, startAfterNrOfConsumers = 2, bufferSize = 8)) + val result1 = source.runWith(Sink.seq) + val result2 = source.runWith(Sink.seq) + result1.futureValue should ===(List("usr-2")) + result2.futureValue should ===(List("usr-1", "usr-1", "usr-3")) + } + + "be able to use as fastest consumer router" in assertAllStagesStopped { + val source = Source(0 until 1000).runWith(PartitionHub.statefulSink( + () ⇒ (info, elem) ⇒ info.consumerIds.toVector.minBy(id ⇒ info.queueSize(id)), + startAfterNrOfConsumers = 2, bufferSize = 4)) + val result1 = source.runWith(Sink.seq) + val result2 = source.throttle(10, 100.millis, 10, ThrottleMode.Shaping).runWith(Sink.seq) + + result1.futureValue.size should be > (result2.futureValue.size) + } + + "route evenly" in assertAllStagesStopped { + val (testSource, hub) = TestSource.probe[Int].toMat( + PartitionHub.sink((size, elem) ⇒ elem % size, startAfterNrOfConsumers = 2, bufferSize = 8))(Keep.both).run() + val probe0 = hub.runWith(TestSink.probe[Int]) + val probe1 = hub.runWith(TestSink.probe[Int]) + probe0.request(3) + probe1.request(10) + testSource.sendNext(0) + probe0.expectNext(0) + testSource.sendNext(1) + probe1.expectNext(1) + + testSource.sendNext(2) + testSource.sendNext(3) + testSource.sendNext(4) + probe0.expectNext(2) + probe1.expectNext(3) + probe0.expectNext(4) + + // probe1 has not requested more + testSource.sendNext(5) + testSource.sendNext(6) + testSource.sendNext(7) + probe1.expectNext(5) + probe1.expectNext(7) + probe0.expectNoMsg(10.millis) + probe0.request(10) + probe0.expectNext(6) + + testSource.sendComplete() + probe0.expectComplete() + probe1.expectComplete() + } + + "route unevenly" in assertAllStagesStopped { + val (testSource, hub) = TestSource.probe[Int].toMat( + PartitionHub.sink((size, elem) ⇒ (elem % 3) % 2, startAfterNrOfConsumers = 2, bufferSize = 8))(Keep.both).run() + val probe0 = hub.runWith(TestSink.probe[Int]) + val probe1 = hub.runWith(TestSink.probe[Int]) + + // (_ % 3) % 2 + // 0 => 0 + // 1 => 1 + // 2 => 0 + // 3 => 0 + // 4 => 1 + + probe0.request(10) + probe1.request(10) + testSource.sendNext(0) + probe0.expectNext(0) + testSource.sendNext(1) + probe1.expectNext(1) + testSource.sendNext(2) + probe0.expectNext(2) + testSource.sendNext(3) + probe0.expectNext(3) + testSource.sendNext(4) + probe1.expectNext(4) + + testSource.sendComplete() + probe0.expectComplete() + probe1.expectComplete() + } + + "backpressure" in assertAllStagesStopped { + val (testSource, hub) = TestSource.probe[Int].toMat( + PartitionHub.sink((size, elem) ⇒ 0, startAfterNrOfConsumers = 2, bufferSize = 4))(Keep.both).run() + val probe0 = hub.runWith(TestSink.probe[Int]) + val probe1 = hub.runWith(TestSink.probe[Int]) + probe0.request(10) + probe1.request(10) + testSource.sendNext(0) + probe0.expectNext(0) + testSource.sendNext(1) + probe0.expectNext(1) + testSource.sendNext(2) + probe0.expectNext(2) + testSource.sendNext(3) + probe0.expectNext(3) + testSource.sendNext(4) + probe0.expectNext(4) + + testSource.sendComplete() + probe0.expectComplete() + probe1.expectComplete() + } + + "ensure that from two different speed consumers the slower controls the rate" in assertAllStagesStopped { + val (firstElem, source) = Source.maybe[Int].concat(Source(1 until 20)).toMat( + PartitionHub.sink((size, elem) ⇒ elem % size, startAfterNrOfConsumers = 2, bufferSize = 1))(Keep.both).run() + + val f1 = source.throttle(1, 10.millis, 1, ThrottleMode.shaping).runWith(Sink.seq) + // Second cannot be overwhelmed since the first one throttles the overall rate, and second allows a higher rate + val f2 = source.throttle(10, 10.millis, 8, ThrottleMode.enforcing).runWith(Sink.seq) + + // Ensure subscription of Sinks. This is racy but there is no event we can hook into here. + Thread.sleep(100) + firstElem.success(Some(0)) + f1.futureValue should ===(0 to 18 by 2) + f2.futureValue should ===(1 to 19 by 2) + + } + + "properly signal error to consumers" in assertAllStagesStopped { + val upstream = TestPublisher.probe[Int]() + val source = Source.fromPublisher(upstream).runWith( + PartitionHub.sink((size, elem) ⇒ elem % size, startAfterNrOfConsumers = 2, bufferSize = 8)) + + val downstream1 = TestSubscriber.probe[Int]() + source.runWith(Sink.fromSubscriber(downstream1)) + val downstream2 = TestSubscriber.probe[Int]() + source.runWith(Sink.fromSubscriber(downstream2)) + + downstream1.request(4) + downstream2.request(8) + + (0 until 16) foreach (upstream.sendNext(_)) + + downstream1.expectNext(0, 2, 4, 6) + downstream2.expectNext(1, 3, 5, 7, 9, 11, 13, 15) + + downstream1.expectNoMsg(100.millis) + downstream2.expectNoMsg(100.millis) + + upstream.sendError(TE("Failed")) + + downstream1.expectError(TE("Failed")) + downstream2.expectError(TE("Failed")) + } + + "properly signal completion to consumers arriving after producer finished" in assertAllStagesStopped { + val source = Source.empty[Int].runWith(PartitionHub.sink((size, elem) ⇒ elem % size, startAfterNrOfConsumers = 0)) + // Wait enough so the Hub gets the completion. This is racy, but this is fine because both + // cases should work in the end + Thread.sleep(10) + + source.runWith(Sink.seq).futureValue should ===(Nil) + } + + "remember completion for materialisations after completion" in { + + val (sourceProbe, source) = TestSource.probe[Unit].toMat( + PartitionHub.sink((size, elem) ⇒ 0, startAfterNrOfConsumers = 0))(Keep.both).run() + val sinkProbe = source.runWith(TestSink.probe[Unit]) + + sourceProbe.sendComplete() + + sinkProbe.request(1) + sinkProbe.expectComplete() + + // Materialize a second time. There was a race here, where we managed to enqueue our Source registration just + // immediately before the Hub shut down. + val sink2Probe = source.runWith(TestSink.probe[Unit]) + + sink2Probe.request(1) + sink2Probe.expectComplete() + } + + "properly signal error to consumers arriving after producer finished" in assertAllStagesStopped { + val source = Source.failed[Int](TE("Fail!")).runWith( + PartitionHub.sink((size, elem) ⇒ 0, startAfterNrOfConsumers = 0)) + // Wait enough so the Hub gets the failure. This is racy, but this is fine because both + // cases should work in the end + Thread.sleep(10) + + a[TE] shouldBe thrownBy { + Await.result(source.runWith(Sink.seq), 3.seconds) + } + } + + } + } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala index 74ae72586b..70f80a1dd3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala @@ -297,6 +297,21 @@ class JsonFramingSpec extends AkkaSpec { | "key": "\"" | }""".stripMargin } + + "successfully parse a string that contains escape sequence" in { + val buffer = new JsonObjectParser() + buffer.offer(ByteString( + """ + |{ + | "key": "\\\"" + | } + | """.stripMargin + )) + + buffer.poll().get.utf8String shouldBe """{ + | "key": "\\\"" + | }""".stripMargin + } } "has nested array" should { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySourceSpec.scala index c91d7d9fd0..3dcfe4309c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySourceSpec.scala @@ -91,31 +91,6 @@ class LazySourceSpec extends StreamSpec with DefaultTimeout with ScalaFutures { outProbe.expectError() shouldEqual TE("OMG Who set that on fire!?!") } - val attributesSource = Source.fromGraph( - new GraphStage[SourceShape[Attributes]] { - val out = Outlet[Attributes]("AttributesSource.out") - override val shape: SourceShape[Attributes] = SourceShape(out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { - override def onPull(): Unit = { - push(out, inheritedAttributes) - completeStage() - } - setHandler(out, this) - } - }) - - "propagate attributes to inner streams" in assertAllStagesStopped { - val f = Source.single(attributesSource.addAttributes(Attributes.name("inner"))) - .flatMapMerge(1, identity) - .addAttributes(Attributes.name("outer")) - .runWith(Sink.head) - - val attributes = f.futureValue.attributeList - attributes should contain(Attributes.Name("inner")) - attributes should contain(Attributes.Name("outer")) - attributes.indexOf(Attributes.Name("inner")) < attributes.indexOf(Attributes.Name("outer")) should be(true) - } - "fail correctly when materialization of inner source fails" in assertAllStagesStopped { val matFail = TE("fail!") object FailingInnerMat extends GraphStage[SourceShape[String]] { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MaybeSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MaybeSourceSpec.scala new file mode 100644 index 0000000000..6d9cd2b927 --- /dev/null +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MaybeSourceSpec.scala @@ -0,0 +1,101 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ +package akka.stream.scaladsl + +import akka.stream.{ AbruptStageTerminationException, ActorMaterializer } +import akka.stream.testkit.{ StreamSpec, TestSubscriber, Utils } +import akka.testkit.DefaultTimeout +import org.scalatest.time.{ Millis, Span } + +import scala.concurrent.duration._ +import scala.concurrent.Await +import scala.util.Failure +import scala.util.control.NoStackTrace + +class MaybeSourceSpec extends StreamSpec with DefaultTimeout { + + implicit val materializer = ActorMaterializer() + + "The Maybe Source" must { + + "complete materialized future with None when stream cancels" in Utils.assertAllStagesStopped { + val neverSource = Source.maybe[Int] + val pubSink = Sink.asPublisher[Int](false) + + val (f, neverPub) = neverSource.toMat(pubSink)(Keep.both).run() + + val c = TestSubscriber.manualProbe[Int]() + neverPub.subscribe(c) + val subs = c.expectSubscription() + + subs.request(1000) + c.expectNoMsg(300.millis) + + subs.cancel() + f.future.futureValue shouldEqual None + } + + "allow external triggering of empty completion" in Utils.assertAllStagesStopped { + val neverSource = Source.maybe[Int].filter(_ ⇒ false) + val counterSink = Sink.fold[Int, Int](0) { (acc, _) ⇒ acc + 1 } + + val (neverPromise, counterFuture) = neverSource.toMat(counterSink)(Keep.both).run() + + // external cancellation + neverPromise.trySuccess(None) shouldEqual true + + counterFuture.futureValue shouldEqual 0 + } + + "allow external triggering of empty completion when there was no demand" in Utils.assertAllStagesStopped { + val probe = TestSubscriber.probe[Int]() + val promise = Source.maybe[Int].to(Sink.fromSubscriber(probe)).run() + + // external cancellation + probe.ensureSubscription() + promise.trySuccess(None) shouldEqual true + probe.expectComplete() + } + + "allow external triggering of non-empty completion" in Utils.assertAllStagesStopped { + val neverSource = Source.maybe[Int] + val counterSink = Sink.head[Int] + + val (neverPromise, counterFuture) = neverSource.toMat(counterSink)(Keep.both).run() + + // external cancellation + neverPromise.trySuccess(Some(6)) shouldEqual true + + counterFuture.futureValue shouldEqual 6 + } + + "allow external triggering of onError" in Utils.assertAllStagesStopped { + val neverSource = Source.maybe[Int] + val counterSink = Sink.fold[Int, Int](0) { (acc, _) ⇒ acc + 1 } + + val (neverPromise, counterFuture) = neverSource.toMat(counterSink)(Keep.both).run() + + // external cancellation + neverPromise.tryFailure(new Exception("Boom") with NoStackTrace) shouldEqual true + + counterFuture.failed.futureValue.getMessage should include("Boom") + } + + "complete materialized future when materializer is shutdown" in Utils.assertAllStagesStopped { + val mat = ActorMaterializer() + val neverSource = Source.maybe[Int] + val pubSink = Sink.asPublisher[Int](false) + + val (f, neverPub) = neverSource.toMat(pubSink)(Keep.both).run()(mat) + + val c = TestSubscriber.manualProbe[Int]() + neverPub.subscribe(c) + val subs = c.expectSubscription() + + mat.shutdown() + f.future.failed.futureValue shouldBe an[AbruptStageTerminationException] + } + + } +} \ No newline at end of file diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala index 2687ef82cc..5eea065e34 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala @@ -3,17 +3,18 @@ */ package akka.stream.scaladsl +import akka.Done import akka.actor.Status import akka.pattern.pipe import akka.stream._ import akka.stream.impl.QueueSource +import akka.stream.testkit.{ GraphStageMessages, StreamSpec, TestSourceStage, TestSubscriber } +import akka.stream.testkit.scaladsl.TestSink import akka.stream.testkit.Utils._ import akka.testkit.TestProbe + import scala.concurrent.duration._ import scala.concurrent._ -import akka.Done -import akka.stream.testkit.{ StreamSpec, TestSubscriber, TestSourceStage, GraphStageMessages } -import akka.stream.testkit.scaladsl.TestSink import org.scalatest.time.Span class QueueSourceSpec extends StreamSpec { @@ -174,6 +175,15 @@ class QueueSourceSpec extends StreamSpec { expectMsgClass(classOf[Status.Failure]) } + "complete watching future with failure if materializer shut down" in assertAllStagesStopped { + val tempMap = ActorMaterializer() + val s = TestSubscriber.manualProbe[Int]() + val queue = Source.queue(1, OverflowStrategy.fail).to(Sink.fromSubscriber(s)).run()(tempMap) + queue.watchCompletion().pipeTo(testActor) + tempMap.shutdown() + expectMsgClass(classOf[Status.Failure]) + } + "return false when elemen was not added to buffer" in assertAllStagesStopped { val s = TestSubscriber.manualProbe[Int]() val queue = Source.queue(1, OverflowStrategy.dropNew).to(Sink.fromSubscriber(s)).run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala new file mode 100644 index 0000000000..ccbfddc9b2 --- /dev/null +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala @@ -0,0 +1,491 @@ +/** + * Copyright (C) 2015-2017 Lightbend Inc. + */ +package akka.stream.scaladsl + +import java.util.concurrent.atomic.AtomicInteger + +import akka.Done +import akka.stream.ActorMaterializer +import akka.stream.testkit.StreamSpec +import akka.stream.testkit.Utils.{ TE, assertAllStagesStopped } +import akka.stream.testkit.scaladsl.{ TestSink, TestSource } +import akka.testkit.DefaultTimeout + +import scala.concurrent.Promise +import scala.concurrent.duration._ +import scala.util.{ Failure, Success } + +class RestartSpec extends StreamSpec with DefaultTimeout { + + implicit val mat = ActorMaterializer() + import system.dispatcher + + "A restart with backoff source" should { + "run normally" in assertAllStagesStopped { + val created = new AtomicInteger() + val probe = RestartSource.withBackoff(10.millis, 20.millis, 0) { () ⇒ + created.incrementAndGet() + Source.repeat("a") + }.runWith(TestSink.probe) + + probe.requestNext("a") + probe.requestNext("a") + probe.requestNext("a") + probe.requestNext("a") + probe.requestNext("a") + + created.get() should ===(1) + + probe.cancel() + } + + "restart on completion" in assertAllStagesStopped { + val created = new AtomicInteger() + val probe = RestartSource.withBackoff(10.millis, 20.millis, 0) { () ⇒ + created.incrementAndGet() + Source(List("a", "b")) + }.runWith(TestSink.probe) + + probe.requestNext("a") + probe.requestNext("b") + probe.requestNext("a") + probe.requestNext("b") + probe.requestNext("a") + + created.get() should ===(3) + + probe.cancel() + } + + "restart on failure" in assertAllStagesStopped { + val created = new AtomicInteger() + val probe = RestartSource.withBackoff(10.millis, 20.millis, 0) { () ⇒ + created.incrementAndGet() + Source(List("a", "b", "c")) + .map { + case "c" ⇒ throw TE("failed") + case other ⇒ other + } + }.runWith(TestSink.probe) + + probe.requestNext("a") + probe.requestNext("b") + probe.requestNext("a") + probe.requestNext("b") + probe.requestNext("a") + + created.get() should ===(3) + + probe.cancel() + } + + "backoff before restart" in assertAllStagesStopped { + val created = new AtomicInteger() + val probe = RestartSource.withBackoff(200.millis, 1.second, 0) { () ⇒ + created.incrementAndGet() + Source(List("a", "b")) + }.runWith(TestSink.probe) + + probe.requestNext("a") + probe.requestNext("b") + probe.request(1) + // There should be a delay of at least 200ms before we receive the element, wait for 100ms. + val deadline = 100.millis.fromNow + // But the delay shouldn't be more than 300ms. + probe.expectNext(300.milliseconds, "a") + deadline.isOverdue() should be(true) + + created.get() should ===(2) + + probe.cancel() + } + + "reset exponential backoff back to minimum when source runs for at least minimum backoff without completing" in assertAllStagesStopped { + val created = new AtomicInteger() + val probe = RestartSource.withBackoff(200.millis, 2.seconds, 0) { () ⇒ + created.incrementAndGet() + Source(List("a", "b")) + }.runWith(TestSink.probe) + + probe.requestNext("a") + probe.requestNext("b") + // There should be a 200ms delay + probe.requestNext("a") + probe.requestNext("b") + probe.request(1) + // The probe should now be backing off for 400ms + + // Now wait for the 400ms delay to pass, then it will start the new source, we also want to wait for the + // subsequent 200ms min backoff to pass, so it resets the restart count + Thread.sleep(700) + + probe.expectNext("a") + probe.requestNext("b") + + // We should have reset, so the restart delay should be back to 200ms, ie we should definitely receive the + // next element within 300ms + probe.requestNext(300.milliseconds) should ===("a") + + created.get() should ===(4) + + probe.cancel() + } + + "cancel the currently running source when cancelled" in assertAllStagesStopped { + val created = new AtomicInteger() + val promise = Promise[Done]() + val probe = RestartSource.withBackoff(10.millis, 2.seconds, 0) { () ⇒ + created.incrementAndGet() + Source.repeat("a").watchTermination() { (_, term) ⇒ + promise.completeWith(term) + } + }.runWith(TestSink.probe) + + probe.requestNext("a") + probe.cancel() + + promise.future.futureValue should ===(Done) + + // Wait to ensure it isn't restarted + Thread.sleep(200) + created.get() should ===(1) + } + + "not restart the source when cancelled while backing off" in assertAllStagesStopped { + val created = new AtomicInteger() + val probe = RestartSource.withBackoff(200.millis, 2.seconds, 0) { () ⇒ + created.incrementAndGet() + Source.single("a") + }.runWith(TestSink.probe) + + probe.requestNext("a") + probe.request(1) + // Should be backing off now + probe.cancel() + + // Wait to ensure it isn't restarted + Thread.sleep(300) + created.get() should ===(1) + } + } + + "A restart with backoff sink" should { + "run normally" in assertAllStagesStopped { + val created = new AtomicInteger() + val result = Promise[Seq[String]]() + val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(10.millis, 20.millis, 0) { () ⇒ + created.incrementAndGet() + Sink.seq.mapMaterializedValue(result.completeWith) + })(Keep.left).run() + + probe.sendNext("a") + probe.sendNext("b") + probe.sendNext("c") + probe.sendComplete() + + result.future.futureValue should contain inOrderOnly ("a", "b", "c") + created.get() should ===(1) + } + + "restart on cancellation" in assertAllStagesStopped { + val created = new AtomicInteger() + val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() + val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(10.millis, 20.millis, 0) { () ⇒ + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true) + .to(Sink.foreach(queue.sendNext)) + })(Keep.left).run() + + probe.sendNext("a") + sinkProbe.requestNext("a") + probe.sendNext("b") + sinkProbe.requestNext("b") + probe.sendNext("cancel") + sinkProbe.requestNext("cancel") + probe.sendNext("c") + sinkProbe.requestNext("c") + + created.get() should ===(2) + + sinkProbe.cancel() + probe.sendComplete() + } + + "backoff before restart" in assertAllStagesStopped { + val created = new AtomicInteger() + val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() + val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(200.millis, 2.seconds, 0) { () ⇒ + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true) + .to(Sink.foreach(queue.sendNext)) + })(Keep.left).run() + + probe.sendNext("a") + sinkProbe.requestNext("a") + probe.sendNext("cancel") + sinkProbe.requestNext("cancel") + probe.sendNext("b") + sinkProbe.request(1) + val deadline = 100.millis.fromNow + sinkProbe.expectNext(300.millis, "b") + deadline.isOverdue() should be(true) + + created.get() should ===(2) + + sinkProbe.cancel() + probe.sendComplete() + } + + "reset exponential backoff back to minimum when sink runs for at least minimum backoff without completing" in assertAllStagesStopped { + val created = new AtomicInteger() + val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() + val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(200.millis, 2.seconds, 0) { () ⇒ + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true) + .to(Sink.foreach(queue.sendNext)) + })(Keep.left).run() + + probe.sendNext("a") + sinkProbe.requestNext("a") + probe.sendNext("cancel") + sinkProbe.requestNext("cancel") + // There should be a 200ms delay + probe.sendNext("b") + sinkProbe.requestNext("b") + probe.sendNext("cancel") + sinkProbe.requestNext("cancel") + sinkProbe.request(1) + // The probe should now be backing off for 400ms + + // Now wait for the 400ms delay to pass, then it will start the new source, we also want to wait for the + // subsequent 200ms min backoff to pass, so it resets the restart count + Thread.sleep(700) + + probe.sendNext("cancel") + sinkProbe.requestNext("cancel") + + // We should have reset, so the restart delay should be back to 200ms, ie we should definitely receive the + // next element within 300ms + probe.sendNext("c") + sinkProbe.request(1) + sinkProbe.expectNext(300.milliseconds, "c") + + created.get() should ===(4) + + sinkProbe.cancel() + probe.sendComplete() + } + + "not restart the sink when completed while backing off" in assertAllStagesStopped { + val created = new AtomicInteger() + val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() + val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(200.millis, 2.seconds, 0) { () ⇒ + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true) + .to(Sink.foreach(queue.sendNext)) + })(Keep.left).run() + + probe.sendNext("a") + sinkProbe.requestNext("a") + probe.sendNext("cancel") + sinkProbe.requestNext("cancel") + // Should be backing off now + probe.sendComplete() + + // Wait to ensure it isn't restarted + Thread.sleep(300) + created.get() should ===(1) + + sinkProbe.cancel() + } + } + + "A restart with backoff flow" should { + + def setupFlow(minBackoff: FiniteDuration, maxBackoff: FiniteDuration) = { + val created = new AtomicInteger() + val (flowInSource, flowInProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() + val (flowOutProbe, flowOutSource) = TestSource.probe[String].toMat(BroadcastHub.sink)(Keep.both).run() + + // We can't just use ordinary probes here because we're expecting them to get started/restarted. Instead, we + // simply use the probes as a message bus for feeding and capturing events. + val (source, sink) = TestSource.probe[String].viaMat(RestartFlow.withBackoff(minBackoff, maxBackoff, 0) { () ⇒ + created.incrementAndGet() + Flow.fromSinkAndSource( + Flow[String].takeWhile(_ != "cancel").to(Sink.foreach(flowInSource.sendNext).mapMaterializedValue(_.onComplete { + case Success(_) ⇒ flowInSource.sendNext("in complete") + case Failure(_) ⇒ flowInSource.sendNext("in error") + })), + flowOutSource.takeWhile(_ != "complete").map { + case "error" ⇒ throw TE("error") + case other ⇒ other + }.watchTermination()((_, term) ⇒ + term.foreach(_ ⇒ { + flowInSource.sendNext("out complete") + }) + ) + ) + })(Keep.left).toMat(TestSink.probe[String])(Keep.both).run() + + (created, source, flowInProbe, flowOutProbe, sink) + } + + "run normally" in assertAllStagesStopped { + val created = new AtomicInteger() + val (source, sink) = TestSource.probe[String].viaMat(RestartFlow.withBackoff(10.millis, 20.millis, 0) { () ⇒ + created.incrementAndGet() + Flow[String] + })(Keep.left).toMat(TestSink.probe[String])(Keep.both).run() + + source.sendNext("a") + sink.requestNext("a") + source.sendNext("b") + sink.requestNext("b") + + created.get() should ===(1) + + source.sendComplete() + } + + "restart on cancellation" in { + val (created, source, flowInProbe, flowOutProbe, sink) = setupFlow(10.millis, 20.millis) + + source.sendNext("a") + flowInProbe.requestNext("a") + flowOutProbe.sendNext("b") + sink.requestNext("b") + + source.sendNext("cancel") + // This will complete the flow in probe and cancel the flow out probe + flowInProbe.request(2) + Seq(flowInProbe.expectNext(), flowInProbe.expectNext()) should contain only ("in complete", "out complete") + + // and it should restart + source.sendNext("c") + flowInProbe.requestNext("c") + flowOutProbe.sendNext("d") + sink.requestNext("d") + + created.get() should ===(2) + } + + "restart on completion" in { + val (created, source, flowInProbe, flowOutProbe, sink) = setupFlow(10.millis, 20.millis) + + source.sendNext("a") + flowInProbe.requestNext("a") + flowOutProbe.sendNext("b") + sink.requestNext("b") + + sink.request(1) + flowOutProbe.sendNext("complete") + + // This will complete the flow in probe and cancel the flow out probe + flowInProbe.request(2) + Seq(flowInProbe.expectNext(), flowInProbe.expectNext()) should contain only ("in complete", "out complete") + + // and it should restart + source.sendNext("c") + flowInProbe.requestNext("c") + flowOutProbe.sendNext("d") + sink.requestNext("d") + + created.get() should ===(2) + } + + "restart on failure" in { + val (created, source, flowInProbe, flowOutProbe, sink) = setupFlow(10.millis, 20.millis) + + source.sendNext("a") + flowInProbe.requestNext("a") + flowOutProbe.sendNext("b") + sink.requestNext("b") + + sink.request(1) + flowOutProbe.sendNext("error") + + // This should complete the in probe + flowInProbe.requestNext("in complete") + + // and it should restart + source.sendNext("c") + flowInProbe.requestNext("c") + flowOutProbe.sendNext("d") + sink.requestNext("d") + + created.get() should ===(2) + } + + "backoff before restart" in { + val (created, source, flowInProbe, flowOutProbe, sink) = setupFlow(200.millis, 2.seconds) + + source.sendNext("a") + flowInProbe.requestNext("a") + flowOutProbe.sendNext("b") + sink.requestNext("b") + + source.sendNext("cancel") + // This will complete the flow in probe and cancel the flow out probe + flowInProbe.request(2) + Seq(flowInProbe.expectNext(), flowInProbe.expectNext()) should contain only ("in complete", "out complete") + + source.sendNext("c") + flowInProbe.request(1) + val deadline = 100.millis.fromNow + flowInProbe.expectNext(300.millis, "c") + deadline.isOverdue() should be(true) + + created.get() should ===(2) + } + + "continue running flow out port after in has been sent completion" in { + val (created, source, flowInProbe, flowOutProbe, sink) = setupFlow(20.millis, 40.seconds) + + source.sendNext("a") + flowInProbe.requestNext("a") + flowOutProbe.sendNext("b") + sink.requestNext("b") + + source.sendComplete() + flowInProbe.requestNext("in complete") + + flowOutProbe.sendNext("c") + sink.requestNext("c") + flowOutProbe.sendNext("d") + sink.requestNext("d") + + sink.request(1) + flowOutProbe.sendComplete() + flowInProbe.requestNext("out complete") + sink.expectComplete() + + created.get() should ===(1) + } + + "continue running flow in port after out has been cancelled" in { + val (created, source, flowInProbe, flowOutProbe, sink) = setupFlow(20.millis, 40.seconds) + + source.sendNext("a") + flowInProbe.requestNext("a") + flowOutProbe.sendNext("b") + sink.requestNext("b") + + sink.cancel() + flowInProbe.requestNext("out complete") + + source.sendNext("c") + flowInProbe.requestNext("c") + source.sendNext("d") + flowInProbe.requestNext("d") + + source.sendNext("cancel") + flowInProbe.requestNext("in complete") + source.expectCancellation() + + created.get() should ===(1) + } + + } + +} diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala index 82985a91b5..c354a908eb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala @@ -64,79 +64,6 @@ class SourceSpec extends StreamSpec with DefaultTimeout { } } - "Failed Source" must { - "emit error immediately" in { - val ex = new RuntimeException with NoStackTrace - val p = Source.failed(ex).runWith(Sink.asPublisher(false)) - val c = TestSubscriber.manualProbe[Int]() - p.subscribe(c) - c.expectSubscriptionAndError(ex) - - // reject additional subscriber - val c2 = TestSubscriber.manualProbe[Int]() - p.subscribe(c2) - c2.expectSubscriptionAndError() - } - } - - "Maybe Source" must { - "complete materialized future with None when stream cancels" in Utils.assertAllStagesStopped { - val neverSource = Source.maybe[Int] - val pubSink = Sink.asPublisher[Int](false) - - val (f, neverPub) = neverSource.toMat(pubSink)(Keep.both).run() - - val c = TestSubscriber.manualProbe[Int]() - neverPub.subscribe(c) - val subs = c.expectSubscription() - - subs.request(1000) - c.expectNoMsg(300.millis) - - subs.cancel() - Await.result(f.future, 3.seconds) shouldEqual None - } - - "allow external triggering of empty completion" in Utils.assertAllStagesStopped { - val neverSource = Source.maybe[Int].filter(_ ⇒ false) - val counterSink = Sink.fold[Int, Int](0) { (acc, _) ⇒ acc + 1 } - - val (neverPromise, counterFuture) = neverSource.toMat(counterSink)(Keep.both).run() - - // external cancellation - neverPromise.trySuccess(None) shouldEqual true - - Await.result(counterFuture, 3.seconds) shouldEqual 0 - } - - "allow external triggering of non-empty completion" in Utils.assertAllStagesStopped { - val neverSource = Source.maybe[Int] - val counterSink = Sink.head[Int] - - val (neverPromise, counterFuture) = neverSource.toMat(counterSink)(Keep.both).run() - - // external cancellation - neverPromise.trySuccess(Some(6)) shouldEqual true - - Await.result(counterFuture, 3.seconds) shouldEqual 6 - } - - "allow external triggering of onError" in Utils.assertAllStagesStopped { - val neverSource = Source.maybe[Int] - val counterSink = Sink.fold[Int, Int](0) { (acc, _) ⇒ acc + 1 } - - val (neverPromise, counterFuture) = neverSource.toMat(counterSink)(Keep.both).run() - - // external cancellation - neverPromise.failure(new Exception("Boom") with NoStackTrace) - - val ready = Await.ready(counterFuture, 3.seconds) - val Failure(ex) = ready.value.get - ex.getMessage should include("Boom") - } - - } - "Composite Source" must { "merge from many inputs" in { val probes = immutable.Seq.fill(5)(TestPublisher.manualProbe[Int]()) diff --git a/akka-stream/src/main/mima-filters/2.4.10.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.10.backwards.excludes new file mode 100644 index 0000000000..06eab81c14 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.10.backwards.excludes @@ -0,0 +1,5 @@ +# #21290 new zipWithIndex flow op +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.zipWithIndex") + +# class akka.stream.impl.fusing.Map is declared final in current version +ProblemFilters.exclude[FinalClassProblem]("akka.stream.impl.fusing.Map") diff --git a/akka-stream/src/main/mima-filters/2.4.11.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.11.backwards.excludes new file mode 100644 index 0000000000..fc214eaee2 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.11.backwards.excludes @@ -0,0 +1,15 @@ +# #20795 IOResult construction exposed +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.IOResult$") + +# #21727 moved all of Unfold.scala in package akka.stream.impl +ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.UnfoldAsync") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.Unfold") + +# abstract method makeLogger(java.lang.Class)akka.event.LoggingAdapter in interface akka.stream.MaterializerLoggingProvider is inherited by class ActorMaterializer in current version. +ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.MaterializerLoggingProvider.makeLogger") + +# #21330 takeWhile inclusive flag +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.takeWhile") + +# #21541 new ScanAsync flow op +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.scanAsync") diff --git a/akka-stream/src/main/mima-filters/2.4.12.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.12.backwards.excludes new file mode 100644 index 0000000000..3c058fb527 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.12.backwards.excludes @@ -0,0 +1,5 @@ +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.Materializer.materialize") + +# #20553 Tree flattening should be separate from Fusing +ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$StructuralInfo") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$StructuralInfo$") diff --git a/akka-stream/src/main/mima-filters/2.4.14.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.14.backwards.excludes new file mode 100644 index 0000000000..e7a9b4dd5a --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.14.backwards.excludes @@ -0,0 +1,6 @@ +# #21688 +ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$StructuralInfo$") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$StructuralInfo") + +# #21989 - add more information in tcp connection shutdown logs (add mapError) +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.mapError") diff --git a/akka-stream/src/main/mima-filters/2.4.17.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.17.backwards.excludes new file mode 100644 index 0000000000..d040ab0471 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.17.backwards.excludes @@ -0,0 +1,10 @@ +# #22711 changes to groupedWithin internal classes +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.groupedWeightedWithin") + +# #22657 changes to internal classes +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FilePublisher.props") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FilePublisher.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSink.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSource.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSubscriber.props") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSubscriber.this") diff --git a/akka-stream/src/main/mima-filters/2.4.2.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.2.backwards.excludes new file mode 100644 index 0000000000..29e3f53402 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.2.backwards.excludes @@ -0,0 +1,30 @@ +# this class is private +ProblemFilters.exclude[FinalClassProblem]("akka.stream.stage.GraphStageLogic$Reading") + +# lifting this method to the type where it belongs +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.mapMaterializedValue") + +# #20009 internal and shouldn't have been public +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.QueueSource.completion") + +# #20015 simplify materialized value computation tree +ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.StreamLayout#AtomicModule.subModules") +ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.StreamLayout#AtomicModule.downstreams") +ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.StreamLayout#AtomicModule.upstreams") +ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.Stages#DirectProcessor.toString") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.impl.MaterializerSession.materializeAtomic") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.impl.MaterializerSession.materializeAtomic") +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.impl.Stages$StageModule") +ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.Stages#GroupBy.toString") +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.impl.FlowModule") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.FlowModule.subModules") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.impl.FlowModule.label") +ProblemFilters.exclude[FinalClassProblem]("akka.stream.impl.fusing.GraphModule") + +# #19877 Source.queue termination support +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.impl.SourceQueueAdapter.this") + +# #19390 Add flow monitor +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.monitor") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.fusing.GraphStages$TickSource$") + diff --git a/akka-stream/src/main/mima-filters/2.4.4.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.4.backwards.excludes new file mode 100644 index 0000000000..c2911f7d36 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.4.backwards.excludes @@ -0,0 +1,18 @@ +# #20123 +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.recoverWithRetries") + +# internal api +ProblemFilters.exclude[Problem]("akka.stream.impl.*") + +# #20131 - flow combinator +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.backpressureTimeout") + +# #19834 +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.extra.Timed$StartTimed") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#StartTimed.onPush") +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.extra.Timed$TimedInterval") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#TimedInterval.onPush") +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.extra.Timed$StopTimed") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#StopTimed.onPush") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#StopTimed.onUpstreamFinish") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#StopTimed.onUpstreamFailure") diff --git a/akka-stream/src/main/mima-filters/2.4.6.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.6.backwards.excludes new file mode 100644 index 0000000000..e751892ead --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.6.backwards.excludes @@ -0,0 +1,27 @@ +# #20229 migrate GroupBy to GraphStage +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Builder.deprecatedAndThen") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.deprecatedAndThen") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.deprecatedAndThenMat") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.deprecatedAndThen") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.FlowOps.deprecatedAndThen") + +# #20367 Converts DelimiterFramingStage from PushPullStage to GraphStage +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.scaladsl.Framing$DelimiterFramingStage") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#DelimiterFramingStage.onPush") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#DelimiterFramingStage.onUpstreamFinish") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#DelimiterFramingStage.onPull") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#DelimiterFramingStage.postStop") + +# #20345 converts LengthFieldFramingStage to GraphStage +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.scaladsl.Framing$LengthFieldFramingStage") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#LengthFieldFramingStage.onPush") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#LengthFieldFramingStage.onUpstreamFinish") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#LengthFieldFramingStage.onPull") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#LengthFieldFramingStage.postStop") + +# #20414 Allow different ActorMaterializer subtypes +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.ActorMaterializer.downcast") + +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.scaladsl.TLS.apply$default$5") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.TLS.apply$default$4") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl.deprecatedAndThen") diff --git a/akka-stream/src/main/mima-filters/2.4.7.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.7.backwards.excludes new file mode 100644 index 0000000000..791d4db9e6 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.7.backwards.excludes @@ -0,0 +1 @@ +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.ActorMaterializer.downcast") diff --git a/akka-stream/src/main/mima-filters/2.4.8.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.8.backwards.excludes new file mode 100644 index 0000000000..904a06e541 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.8.backwards.excludes @@ -0,0 +1,11 @@ +# #20543 GraphStage subtypes should not be private to akka +ProblemFilters.exclude[DirectAbstractMethodProblem]("akka.stream.ActorMaterializer.actorOf") + +# Interpreter internals change +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.stage.GraphStageLogic.portToConn") + +ProblemFilters.exclude[DirectAbstractMethodProblem]("akka.stream.ActorMaterializer.actorOf") + +# #20630 corrected return types of java methods +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.javadsl.RunnableGraph#RunnableGraphAdapter.named") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.javadsl.RunnableGraph.withAttributes") diff --git a/akka-stream/src/main/mima-filters/2.4.9.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.9.backwards.excludes new file mode 100644 index 0000000000..d433a6fba7 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.9.backwards.excludes @@ -0,0 +1,11 @@ +# #21025 new orElse flow op +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.orElseGraph") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.orElse") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.orElseMat") + +# #20888 new FoldAsync op for Flow +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.foldAsync") + +# method ChaseLimit()Int in object akka.stream.impl.fusing.GraphInterpreter does not have a correspondent in current version +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.fusing.GraphInterpreter.ChaseLimit") + diff --git a/akka-stream/src/main/mima-filters/2.4.x.backwards.excludes b/akka-stream/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..7a2930e9d1 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,101 @@ +# #21423 removal of deprecated stages (in 2.5.x) +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Source.transform") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.SubSource.transform") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Flow.transform") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.SubFlow.transform") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.transformMaterializing") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.transform") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.transformMaterializing") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.andThen") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.transform") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.transformMaterializing") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.andThen") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.FlowOps.transform") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.FlowOps.andThen") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.Directive") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AsyncDirective") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.TerminationDirective") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage$") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$Become$") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage$PushPullGraphStage") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$EmittingState$") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage$PushPullGraphLogic") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.Context") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.Stage") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.DetachedStage") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$Become") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StageState") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage$PushPullGraphStageWithMaterializedValue") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.DownstreamDirective") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.PushPullStage") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.LifecycleContext") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$EmittingState") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.PushStage") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.DetachedContext") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$State") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.UpstreamDirective") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.FreeDirective") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$AndThen") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.SyncDirective") + +# deprecated method transform(scala.Function0)akka.stream.scaladsl.FlowOps in class akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl does not have a correspondent in current version +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl.transform") +# method andThen(akka.stream.impl.Stages#SymbolicStage)akka.stream.scaladsl.FlowOps in class akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl does not have a correspondent in current version +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl.andThen") +# object akka.stream.stage.StatefulStage#Stay does not have a correspondent in current version +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$Stay$") +# object akka.stream.stage.StatefulStage#Finish does not have a correspondent in current version +ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$Finish$") + +# implementation classes +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.SubFlowImpl.transform") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.SubFlowImpl.andThen") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.Stages$SymbolicGraphStage$") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.Stages$SymbolicStage") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.Stages$SymbolicGraphStage") + +# new materializer changes relating to old module structure +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.BidiShape.copyFromPorts") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.BidiShape.reversed") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.MaterializationContext.stageName") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.SinkShape.copyFromPorts") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.Shape.copyFromPorts") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.ClosedShape.copyFromPorts") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$FusedGraph$") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.Attributes.extractName") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.AmorphousShape.copyFromPorts") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.SourceShape.copyFromPorts") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$FusedGraph") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.FlowShape.copyFromPorts") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.Graph.module") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.Graph.traversalBuilder") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Source.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.RunnableGraph#RunnableGraphAdapter.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.BidiFlow.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Sink.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Flow.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Sink.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Sink.this") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.scaladsl.RunnableGraph.apply") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.GraphApply$GraphImpl") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.RunnableGraph.module") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.scaladsl.RunnableGraph.copy") +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.scaladsl.RunnableGraph.copy$default$1") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.scaladsl.RunnableGraph.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.BidiFlow.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.BidiFlow.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Builder.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.this") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.GraphApply$") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.module") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.stage.GraphStageWithMaterializedValue.module") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.ModuleExtractor") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.ModuleExtractor$") +ProblemFilters.exclude[Problem]("akka.stream.impl.*") diff --git a/akka-stream/src/main/mima-filters/2.5.0.backwards.excludes b/akka-stream/src/main/mima-filters/2.5.0.backwards.excludes new file mode 100644 index 0000000000..e1a355bf70 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.5.0.backwards.excludes @@ -0,0 +1,13 @@ +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.groupedWeightedWithin") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSubscriber.props") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSource.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSink.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FilePublisher.props") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSubscriber.this") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FilePublisher.this") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.fusing.GroupedWithin") + +ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.Graph.traversalBuilder") +ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.Graph.named") +ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.Graph.addAttributes") +ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.Graph.async") diff --git a/akka-stream/src/main/mima-filters/2.5.2.backwards.excludes b/akka-stream/src/main/mima-filters/2.5.2.backwards.excludes new file mode 100644 index 0000000000..5fb0fb549d --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.5.2.backwards.excludes @@ -0,0 +1,2 @@ +# #23144 recoverWithRetries cleanup +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.fusing.RecoverWith.InfiniteRetries") diff --git a/akka-stream/src/main/mima-filters/2.5.3.backwards.excludes b/akka-stream/src/main/mima-filters/2.5.3.backwards.excludes new file mode 100644 index 0000000000..9e77d2e963 --- /dev/null +++ b/akka-stream/src/main/mima-filters/2.5.3.backwards.excludes @@ -0,0 +1,10 @@ +# #22789 Source.maybe rewritten as a graph stage +ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.MaybePublisher") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.MaybePublisher$MaybeSubscription") +ProblemFilters.exclude[MissingTypesProblem]("akka.stream.impl.MaybeSource") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.MaybeSource.newInstance") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.MaybeSource.withAttributes") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.MaybeSource.attributes") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.MaybeSource.create") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.MaybeSource.this") +ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.MaybePublisher$") diff --git a/akka-stream/src/main/resources/reference.conf b/akka-stream/src/main/resources/reference.conf index 896ead6861..efa1753195 100644 --- a/akka-stream/src/main/resources/reference.conf +++ b/akka-stream/src/main/resources/reference.conf @@ -5,7 +5,7 @@ akka { stream { - # Default flow materializer settings + # Default materializer settings materializer { # Initial size of buffers used in stream elements @@ -14,7 +14,7 @@ akka { max-input-buffer-size = 16 # Fully qualified config path which holds the dispatcher configuration - # to be used by FlowMaterialiser when creating Actors. + # to be used by ActorMaterializer when creating Actors. # When this value is left empty, the default-dispatcher will be used. dispatcher = "" @@ -75,7 +75,7 @@ akka { } # Fully qualified config path which holds the dispatcher configuration - # to be used by FlowMaterialiser when creating Actors for IO operations, + # to be used by ActorMaterializer when creating Actors for IO operations, # such as FileSource, FileSink and others. blocking-io-dispatcher = "akka.stream.default-blocking-io-dispatcher" diff --git a/akka-stream/src/main/scala/akka/stream/Attributes.scala b/akka-stream/src/main/scala/akka/stream/Attributes.scala index 09ad298760..4e85aeec37 100644 --- a/akka-stream/src/main/scala/akka/stream/Attributes.scala +++ b/akka-stream/src/main/scala/akka/stream/Attributes.scala @@ -297,15 +297,21 @@ object ActorAttributes { /** * Scala API: Decides how exceptions from user are to be handled. + * + * Stages supporting supervision strategies explicitly document that they do so. If a stage does not document + * support for these, it should be assumed it does not support supervision. */ def supervisionStrategy(decider: Supervision.Decider): Attributes = Attributes(SupervisionStrategy(decider)) /** * Java API: Decides how exceptions from application code are to be handled. + * + * Stages supporting supervision strategies explicitly document that they do so. If a stage does not document + * support for these, it should be assumed it does not support supervision. */ def withSupervisionStrategy(decider: function.Function[Throwable, Supervision.Directive]): Attributes = - ActorAttributes.supervisionStrategy(decider.apply _) + ActorAttributes.supervisionStrategy(decider.apply) /** * Java API diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala index ab446e9c62..340fe809f3 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala @@ -17,7 +17,7 @@ import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ Await, ExecutionContextExecutor } /** - * ExtendedActorMaterializer used by subtypes which materializer using GraphInterpreterShell + * ExtendedActorMaterializer used by subtypes which delegates in-island wiring to [[akka.stream.impl.PhaseIsland]]s */ @DoNotInherit private[akka] abstract class ExtendedActorMaterializer extends ActorMaterializer { diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala index 93ad580c59..a03ded1458 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala @@ -32,12 +32,12 @@ import akka.annotation.InternalApi def receive = { case OnNext(elem) ⇒ - ref ! elem + ref.tell(elem, ActorRef.noSender) case OnError(cause) ⇒ - ref ! Status.Failure(cause) + ref.tell(Status.Failure(cause), ActorRef.noSender) context.stop(self) case OnComplete ⇒ - ref ! onCompleteMessage + ref.tell(onCompleteMessage, ActorRef.noSender) context.stop(self) case Terminated(`ref`) ⇒ context.stop(self) // will cancel upstream diff --git a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala index ef9410378c..c01b7b4425 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala @@ -44,51 +44,6 @@ import scala.concurrent.{ ExecutionContext, Promise } override def toString: String = name } -/** - * INTERNAL API - */ -@InternalApi private[akka] final case class MaybePublisher[T]( - promise: Promise[Option[T]], - name: String)(implicit ec: ExecutionContext) extends Publisher[T] { - import ReactiveStreamsCompliance._ - - private[this] class MaybeSubscription(subscriber: Subscriber[_ >: T]) extends Subscription { - private[this] var done: Boolean = false - override def cancel(): Unit = { - done = true - promise.trySuccess(None) - } - - override def request(elements: Long): Unit = { - if (elements < 1) rejectDueToNonPositiveDemand(subscriber) - if (!done) { - done = true - promise.future foreach { - // We consciously do not catch SpecViolation here, it will be reported to the ExecutionContext - case Some(v) ⇒ - tryOnNext(subscriber, v) - tryOnComplete(subscriber) - case None ⇒ - tryOnComplete(subscriber) - } - } - } - } - - override def subscribe(subscriber: Subscriber[_ >: T]): Unit = - try { - requireNonNullSubscriber(subscriber) - tryOnSubscribe(subscriber, new MaybeSubscription(subscriber)) - promise.future.failed.foreach { - error ⇒ tryOnError(subscriber, error) - } - } catch { - case sv: SpecViolation ⇒ ec.reportFailure(sv) - } - - override def toString: String = name -} - /** * INTERNAL API * This is only a legal subscription when it is immediately followed by diff --git a/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala b/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala new file mode 100644 index 0000000000..8872d6c22a --- /dev/null +++ b/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala @@ -0,0 +1,31 @@ +/** + * Copyright (C) 2009-2017 Lightbend Inc. + */ +package akka.stream.impl + +import akka.annotation.InternalApi +import akka.stream.{ Attributes, Outlet, SourceShape } +import akka.stream.impl.Stages.DefaultAttributes +import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler } + +/** + * INTERNAL API + */ +@InternalApi private[akka] final class FailedSource[T](failure: Throwable) extends GraphStage[SourceShape[T]] { + val out = Outlet[T]("FailedSource.out") + override val shape = SourceShape(out) + + override protected def initialAttributes: Attributes = DefaultAttributes.failedSource + + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { + + override def onPull(): Unit = () + + override def preStart(): Unit = { + failStage(failure) + } + setHandler(out, this) + } + + override def toString = s"FailedSource(${failure.getClass.getName})" +} \ No newline at end of file diff --git a/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala b/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala index c8320c57d3..530f1120db 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala @@ -120,7 +120,7 @@ import scala.annotation.switch pos += 1 trimFront += 1 } else if (input == Backslash) { - if (lastInput == Backslash) isStartOfEscapeSequence = false + if (lastInput == Backslash & isStartOfEscapeSequence) isStartOfEscapeSequence = false else isStartOfEscapeSequence = true pos += 1 } else if (input == DoubleQuote) { diff --git a/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala b/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala new file mode 100644 index 0000000000..fe4fdb4a23 --- /dev/null +++ b/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala @@ -0,0 +1,84 @@ +/** + * Copyright (C) 2009-2017 Lightbend Inc. + */ +package akka.stream.impl + +import akka.annotation.InternalApi +import akka.dispatch.ExecutionContexts +import akka.stream.{ AbruptStageTerminationException, Attributes, Outlet, SourceShape } +import akka.stream.impl.Stages.DefaultAttributes +import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, OutHandler } +import akka.util.OptionVal + +import scala.concurrent.Promise +import scala.util.Try + +/** + * INTERNAL API + */ +@InternalApi private[akka] object MaybeSource extends GraphStageWithMaterializedValue[SourceShape[AnyRef], Promise[Option[AnyRef]]] { + val out = Outlet[AnyRef]("MaybeSource.out") + override val shape = SourceShape(out) + + override protected def initialAttributes = DefaultAttributes.maybeSource + + override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Promise[Option[AnyRef]]) = { + import scala.util.{ Success ⇒ ScalaSuccess, Failure ⇒ ScalaFailure } + val promise = Promise[Option[AnyRef]]() + val logic = new GraphStageLogic(shape) with OutHandler { + + private var arrivedEarly: OptionVal[AnyRef] = OptionVal.None + + override def preStart(): Unit = { + promise.future.value match { + case Some(value) ⇒ + // already completed, shortcut + handleCompletion(value) + case None ⇒ + // callback on future completion + promise.future.onComplete( + getAsyncCallback(handleCompletion).invoke + )(ExecutionContexts.sameThreadExecutionContext) + } + } + + override def onPull(): Unit = arrivedEarly match { + case OptionVal.Some(value) ⇒ + push(out, value) + completeStage() + case OptionVal.None ⇒ + } + + private def handleCompletion(elem: Try[Option[AnyRef]]): Unit = { + elem match { + case ScalaSuccess(None) ⇒ + completeStage() + case ScalaSuccess(Some(value)) ⇒ + if (isAvailable(out)) { + push(out, value) + completeStage() + } else { + arrivedEarly = OptionVal.Some(value) + } + case ScalaFailure(ex) ⇒ + failStage(ex) + } + } + + override def onDownstreamFinish(): Unit = { + promise.tryComplete(ScalaSuccess(None)) + } + + override def postStop(): Unit = { + if (!promise.isCompleted) + promise.tryFailure(new AbruptStageTerminationException(this)) + } + + setHandler(out, this) + + } + (logic, promise) + } + + override def toString = "MaybeSource" +} \ No newline at end of file diff --git a/akka-stream/src/main/scala/akka/stream/impl/Modules.scala b/akka-stream/src/main/scala/akka/stream/impl/Modules.scala index bcef0cf7f9..1e8e5aaf07 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Modules.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Modules.scala @@ -77,19 +77,6 @@ import akka.util.OptionVal override def withAttributes(attr: Attributes): SourceModule[Out, NotUsed] = new PublisherSource[Out](p, attr, amendShape(attr)) } -/** - * INTERNAL API - */ -@InternalApi private[akka] final class MaybeSource[Out](val attributes: Attributes, shape: SourceShape[Out]) extends SourceModule[Out, Promise[Option[Out]]](shape) { - - override def create(context: MaterializationContext) = { - val p = Promise[Option[Out]]() - new MaybePublisher[Out](p, attributes.nameOrDefault("MaybeSource"))(context.materializer.executionContext) → p - } - override protected def newInstance(shape: SourceShape[Out]): SourceModule[Out, Promise[Option[Out]]] = new MaybeSource[Out](attributes, shape) - override def withAttributes(attr: Attributes): SourceModule[Out, Promise[Option[Out]]] = new MaybeSource(attr, amendShape(attr)) -} - /** * INTERNAL API * Creates and wraps an actor into [[org.reactivestreams.Publisher]] from the given `props`, diff --git a/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala b/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala index db6e986d45..25be1c8295 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala @@ -3,25 +3,17 @@ */ package akka.stream.impl -import akka.dispatch.ExecutionContexts -import akka.stream.ActorAttributes.SupervisionStrategy import akka.stream.OverflowStrategies._ import akka.stream._ -import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage._ -import akka.stream.scaladsl.{ Keep, Source, SourceQueueWithComplete } +import akka.stream.scaladsl.SourceQueueWithComplete -import scala.annotation.tailrec -import scala.concurrent.{ Future, Promise } import akka.Done import java.util.concurrent.CompletionStage - import akka.annotation.InternalApi -import akka.util.OptionVal +import scala.concurrent.{ Future, Promise } import scala.compat.java8.FutureConverters._ -import scala.util.Try -import scala.util.control.NonFatal /** * INTERNAL API @@ -53,9 +45,13 @@ import scala.util.control.NonFatal if (maxBuffer > 0) buffer = Buffer(maxBuffer, materializer) initCallback(callback.invoke) } - override def postStop(): Unit = stopCallback { - case Offer(elem, promise) ⇒ promise.failure(new IllegalStateException("Stream is terminated. SourceQueue is detached")) - case _ ⇒ // ignore + override def postStop(): Unit = { + val exception = new AbruptStageTerminationException(this) + completion.tryFailure(exception) + stopCallback { + case Offer(elem, promise) ⇒ promise.failure(exception) + case _ ⇒ // ignore + } } private def enqueueAndSuccess(offer: Offer[T]): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala b/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala index e3f3946579..aee943d796 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala @@ -109,12 +109,14 @@ import org.reactivestreams.{ Subscriber, Subscription } } final def tryRequest(subscription: Subscription, demand: Long): Unit = { + if (subscription eq null) throw new IllegalStateException("Subscription must be not null on request() call, rule 1.3") try subscription.request(demand) catch { case NonFatal(t) ⇒ throw new SignalThrewException("It is illegal to throw exceptions from request(), rule 3.16", t) } } final def tryCancel(subscription: Subscription): Unit = { + if (subscription eq null) throw new IllegalStateException("Subscription must be not null on cancel() call, rule 1.3") try subscription.cancel() catch { case NonFatal(t) ⇒ throw new SignalThrewException("It is illegal to throw exceptions from cancel(), rule 3.15", t) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/Stages.scala b/akka-stream/src/main/scala/akka/stream/impl/Stages.scala index 46811687fd..a63ec46ae4 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Stages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Stages.scala @@ -4,9 +4,7 @@ package akka.stream.impl import akka.annotation.InternalApi -import akka.stream.ActorAttributes.SupervisionStrategy import akka.stream.Attributes._ -import akka.stream.Supervision.Decider import akka.stream._ /** diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala index cfb812754c..dd279a3da5 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala @@ -635,7 +635,7 @@ import scala.util.control.NonFatal interpreter.connections.foreach { connection ⇒ builder .append(" ") - .append(connection.toString) + .append(if (connection == null) "null" else connection.toString) .append(",\n") } builder.setLength(builder.length - 2) diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala index 0216e90cc1..442219591a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala @@ -641,7 +641,7 @@ import scala.util.control.NonFatal } val logicIndexes = logics.zipWithIndex.map { case (stage, idx) ⇒ stage → idx }.toMap - for (connection ← connections) { + for (connection ← connections if connection != null) { val inName = "N" + logicIndexes(connection.inOwner) val outName = "N" + logicIndexes(connection.outOwner) diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala index 00d65a1c9b..938d66016a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala @@ -1435,6 +1435,7 @@ private[stream] object Collect { private var groupEmitted = true private var finished = false private var totalWeight = 0L + private var hasElements = false override def preStart() = { schedulePeriodically(GroupedWeightedWithin.groupedWeightedWithinTimer, interval) @@ -1444,8 +1445,9 @@ private[stream] object Collect { private def nextElement(elem: T): Unit = { groupEmitted = false val cost = costFn(elem) - if (cost < 0) failStage(new IllegalArgumentException(s"Negative weight [$cost] for element [$elem] is not allowed")) + if (cost < 0L) failStage(new IllegalArgumentException(s"Negative weight [$cost] for element [$elem] is not allowed")) else { + hasElements = true if (totalWeight + cost <= maxWeight) { buf += elem totalWeight += cost @@ -1466,6 +1468,7 @@ private[stream] object Collect { } } } else { + //we have a single heavy element that weighs more than the limit if (totalWeight == 0L) { buf += elem totalWeight += cost @@ -1502,7 +1505,8 @@ private[stream] object Collect { pending = null.asInstanceOf[T] groupEmitted = false } else { - totalWeight = 0 + totalWeight = 0L + hasElements = false } pushEagerly = false if (isAvailable(in)) nextElement(grab(in)) @@ -1521,7 +1525,7 @@ private[stream] object Collect { else tryCloseGroup() } - override protected def onTimer(timerKey: Any) = if (totalWeight > 0) { + override protected def onTimer(timerKey: Any) = if (hasElements) { if (isAvailable(out)) emitGroup() else pushEagerly = true } diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala index f3347e3f61..88a40815c4 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala @@ -114,7 +114,7 @@ private[stream] object InputStreamSinkStage { var isInitialized = false var isActive = true var isStageAlive = true - val subscriberClosedException = new IOException("Reactive stream is terminated, no reads are possible") + def subscriberClosedException = new IOException("Reactive stream is terminated, no reads are possible") var detachedChunk: Option[ByteString] = None @scala.throws(classOf[IOException]) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala index 977f56b3c0..42d653d4c9 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala @@ -161,7 +161,7 @@ private[akka] class OutputStreamAdapter( var isActive = true var isPublisherAlive = true - val publisherClosedException = new IOException("Reactive stream is terminated, no writes are possible") + def publisherClosedException = new IOException("Reactive stream is terminated, no writes are possible") @scala.throws(classOf[IOException]) private[this] def send(sendAction: () ⇒ Unit): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala index b58d3429de..8551f8f691 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala @@ -378,8 +378,8 @@ private[stream] object ConnectionSourceStage { @InternalApi private[akka] object TcpIdleTimeout { def apply(idleTimeout: FiniteDuration, remoteAddress: Option[InetSocketAddress]): BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = { val connectionToString = remoteAddress match { - case Some(addr) ⇒ s" on connection to [$addr]" - case _ ⇒ "" + case Some(address) ⇒ s" on connection to [$address]" + case _ ⇒ "" } val toNetTimeout: BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala index aa8440eff2..48693e662a 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala @@ -21,8 +21,6 @@ import scala.compat.java8.FutureConverters._ object Flow { - private[this] val _identity = new javadsl.Flow(scaladsl.Flow[Any]) - /** Create a `Flow` which can process elements of type `T`. */ def create[T](): javadsl.Flow[T, T, NotUsed] = fromGraph(scaladsl.Flow[T]) @@ -58,10 +56,25 @@ object Flow { * Creates a `Flow` from a `Sink` and a `Source` where the Flow's input * will be sent to the Sink and the Flow's output will come from the Source. * + * The resulting flow can be visualized as: + * {{{ + * +----------------------------------------------+ + * | Resulting Flow[I, O, NotUsed] | + * | | + * | +---------+ +-----------+ | + * | | | | | | + * I ~~> | Sink[I] | [no-connection!] | Source[O] | ~~> O + * | | | | | | + * | +---------+ +-----------+ | + * +----------------------------------------------+ + * }}} + * * The completion of the Sink and Source sides of a Flow constructed using * this method are independent. So if the Sink receives a completion signal, * the Source side will remain unaware of that. If you are looking to couple * the termination signals of the two sides use `Flow.fromSinkAndSourceCoupled` instead. + * + * See also [[fromSinkAndSourceMat]] when access to materialized values of the parameters is needed. */ def fromSinkAndSource[I, O](sink: Graph[SinkShape[I], _], source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] = new Flow(scaladsl.Flow.fromSinkAndSourceMat(sink, source)(scaladsl.Keep.none)) @@ -70,6 +83,19 @@ object Flow { * Creates a `Flow` from a `Sink` and a `Source` where the Flow's input * will be sent to the Sink and the Flow's output will come from the Source. * + * The resulting flow can be visualized as: + * {{{ + * +-------------------------------------------------------+ + * | Resulting Flow[I, O, M] | + * | | + * | +-------------+ +---------------+ | + * | | | | | | + * I ~~> | Sink[I, M1] | [no-connection!] | Source[O, M2] | ~~> O + * | | | | | | + * | +-------------+ +---------------+ | + * +------------------------------------------------------+ + * }}} + * * The completion of the Sink and Source sides of a Flow constructed using * this method are independent. So if the Sink receives a completion signal, * the Source side will remain unaware of that. If you are looking to couple @@ -87,6 +113,19 @@ object Flow { * Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them. * Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages. * + * The resulting flow can be visualized as: + * {{{ + * +---------------------------------------------+ + * | Resulting Flow[I, O, NotUsed] | + * | | + * | +---------+ +-----------+ | + * | | | | | | + * I ~~> | Sink[I] | ~~~(coupled)~~~ | Source[O] | ~~> O + * | | | | | | + * | +---------+ +-----------+ | + * +---------------------------------------------+ + * }}} + * * E.g. if the emitted [[Flow]] gets a cancellation, the [[Source]] of course is cancelled, * however the Sink will also be completed. The table below illustrates the effects in detail: * @@ -128,6 +167,7 @@ object Flow { * * * + * See also [[fromSinkAndSourceCoupledMat]] when access to materialized values of the parameters is needed. */ def fromSinkAndSourceCoupled[I, O](sink: Graph[SinkShape[I], _], source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] = new Flow(scaladsl.Flow.fromSinkAndSourceCoupled(sink, source)) @@ -136,6 +176,19 @@ object Flow { * Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them. * Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages. * + * The resulting flow can be visualized as: + * {{{ + * +-----------------------------------------------------+ + * | Resulting Flow[I, O, M] | + * | | + * | +-------------+ +---------------+ | + * | | | | | | + * I ~~> | Sink[I, M1] | ~~~(coupled)~~~ | Source[O, M2] | ~~> O + * | | | | | | + * | +-------------+ +---------------+ | + * +-----------------------------------------------------+ + * }}} + * * E.g. if the emitted [[Flow]] gets a cancellation, the [[Source]] of course is cancelled, * however the Sink will also be completed. The table on [[Flow.fromSinkAndSourceCoupled]] * illustrates the effects in detail. @@ -170,19 +223,21 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends /** * Transform this [[Flow]] by appending the given processing steps. * {{{ - * +----------------------------+ - * | Resulting Flow | - * | | - * | +------+ +------+ | - * | | | | | | - * In ~~> | this | ~Out~> | flow | ~~> T - * | | | | | | - * | +------+ +------+ | - * +----------------------------+ + * +---------------------------------+ + * | Resulting Flow[In, T, Mat] | + * | | + * | +------+ +------+ | + * | | | | | | + * In ~~> | this | ~~Out~~> | flow | ~~> T + * | | Mat| | M| | + * | +------+ +------+ | + * +---------------------------------+ * }}} * The materialized value of the combined [[Flow]] will be the materialized * value of the current flow (ignoring the other Flow’s value), use * `viaMat` if a different strategy is needed. + * + * See also [[viaMat]] when access to materialized values of the parameter is needed. */ def via[T, M](flow: Graph[FlowShape[Out, T], M]): javadsl.Flow[In, T, Mat] = new Flow(delegate.via(flow)) @@ -190,15 +245,15 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends /** * Transform this [[Flow]] by appending the given processing steps. * {{{ - * +----------------------------+ - * | Resulting Flow | - * | | - * | +------+ +------+ | - * | | | | | | - * In ~~> | this | ~Out~> | flow | ~~> T - * | | | | | | - * | +------+ +------+ | - * +----------------------------+ + * +---------------------------------+ + * | Resulting Flow[In, T, M2] | + * | | + * | +------+ +------+ | + * | | | | | | + * In ~~> | this | ~~Out~~> | flow | ~~> T + * | | Mat| | M| | + * | +------+ +------+ | + * +---------------------------------+ * }}} * The `combine` function is used to compose the materialized values of this flow and that * flow into the materialized value of the resulting Flow. @@ -212,19 +267,21 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends /** * Connect this [[Flow]] to a [[Sink]], concatenating the processing steps of both. * {{{ - * +----------------------------+ - * | Resulting Sink | - * | | - * | +------+ +------+ | - * | | | | | | - * In ~~> | flow | ~Out~> | sink | | - * | | | | | | - * | +------+ +------+ | - * +----------------------------+ + * +------------------------------+ + * | Resulting Sink[In, Mat] | + * | | + * | +------+ +------+ | + * | | | | | | + * In ~~> | flow | ~~Out~~> | sink | | + * | | Mat| | M| | + * | +------+ +------+ | + * +------------------------------+ * }}} * The materialized value of the combined [[Sink]] will be the materialized * value of the current flow (ignoring the given Sink’s value), use * `toMat` if a different strategy is needed. + * + * See also [[toMat]] when access to materialized values of the parameter is needed. */ def to(sink: Graph[SinkShape[Out], _]): javadsl.Sink[In, Mat] = new Sink(delegate.to(sink)) @@ -233,12 +290,12 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * Connect this [[Flow]] to a [[Sink]], concatenating the processing steps of both. * {{{ * +----------------------------+ - * | Resulting Sink | + * | Resulting Sink[In, M2] | * | | * | +------+ +------+ | * | | | | | | * In ~~> | flow | ~Out~> | sink | | - * | | | | | | + * | | Mat| | M| | * | +------+ +------+ | * +----------------------------+ * }}} @@ -263,6 +320,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * The materialized value of the combined [[Flow]] will be the materialized * value of the current flow (ignoring the other Flow’s value), use * `joinMat` if a different strategy is needed. + * + * See also [[joinMat]] when access to materialized values of the parameter is needed. */ def join[M](flow: Graph[FlowShape[Out, In], M]): javadsl.RunnableGraph[Mat] = RunnableGraph.fromGraph(delegate.join(flow)) @@ -323,6 +382,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. + * + * See also [[viaMat]] when access to materialized values of the parameter is needed. */ def joinMat[I2, O2, Mat2, M](bidi: Graph[BidiShape[Out, O2, I2, In], Mat2], combine: function.Function2[Mat, Mat2, M]): Flow[I2, O2, M] = new Flow(delegate.joinMat(bidi)(combinerToScala(combine))) @@ -345,6 +406,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * Transform this stream by applying the given function to each of the elements * as they pass through this processing step. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures @@ -394,6 +457,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * The returned `Iterable` MUST NOT contain `null` values, * as they are illegal as stream elements - according to the Reactive Streams specification. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element or there are still remaining elements * from the previously calculated collection * @@ -461,6 +526,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * The function `f` is always invoked on the elements in the order they arrive (even though the result of the futures * returned by `f` might be emitted in a different order). * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' any of the CompletionStages returned by the provided function complete * * '''Backpressures when''' the number of futures reaches the configured parallelism and the downstream backpressures @@ -477,6 +544,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends /** * Only pass on those elements that satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns true for the element * * '''Backpressures when''' the given predicate returns true for the element and downstream backpressures @@ -492,6 +561,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends /** * Only pass on those elements that NOT satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns false for the element * * '''Backpressures when''' the given predicate returns false for the element and downstream backpressures @@ -508,6 +579,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * on which the function is defined as they pass through this processing step. * Non-matching elements are filtered out. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the provided partial function is defined for the element * * '''Backpressures when''' the partial function is defined for the element and downstream backpressures @@ -575,6 +648,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * The stream will be completed without producing any elements if `n` is zero * or negative. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the specified number of elements to take has not yet been reached * * '''Backpressures when''' downstream backpressures @@ -587,7 +662,7 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * * See also [[Flow.take]], [[Flow.takeWithin]], [[Flow.takeWhile]] */ - def limitWeighted(n: Long)(costFn: function.Function[Out, Long]): javadsl.Flow[In, Out, Mat] = { + def limitWeighted(n: Long)(costFn: function.Function[Out, java.lang.Long]): javadsl.Flow[In, Out, Mat] = { new Flow(delegate.limitWeighted(n)(costFn.apply)) } @@ -619,6 +694,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * [[akka.stream.Supervision#restart]] current value starts at `zero` again * the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the function scanning the element returns a new element * * '''Backpressures when''' downstream backpressures @@ -644,6 +721,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * [[akka.stream.Supervision.Resume]] current value starts at the previous * current value, or zero when it doesn't have one, and the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the future returned by f` completes * * '''Backpressures when''' downstream backpressures @@ -662,6 +741,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * after which it also completes. Applies the given function `f` towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * If the function `f` throws an exception and the supervision decision is * [[akka.stream.Supervision#restart]] current value starts at `zero` again * the stream will continue. @@ -682,6 +763,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * Applies the given function towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * If the function `f` returns a failure and the supervision decision is * [[akka.stream.Supervision.Restart]] current value starts at `zero` again * the stream will continue. @@ -706,6 +789,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * which is semantically in-line with that Scala's standard library collections * do in such situations. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' upstream completes * * '''Backpressures when''' downstream backpressures @@ -815,7 +900,7 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise * IllegalArgumentException is thrown. */ - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, Long], d: FiniteDuration): javadsl.Flow[In, java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: FiniteDuration): javadsl.Flow[In, java.util.List[Out @uncheckedVariance], Mat] = new Flow(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava)) /** @@ -884,6 +969,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * The stream will be completed without producing any elements if predicate is false for * the first stream element. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the predicate is true * * '''Backpressures when''' downstream backpressures @@ -921,6 +1008,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * Discard elements at the beginning of the stream while predicate is true. * All elements will be taken after predicate returns false first time. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' predicate returned false and for all following stream elements * * '''Backpressures when''' predicate returned false and downstream backpressures @@ -1079,6 +1168,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -1107,6 +1198,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -1131,6 +1224,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is an aggregated element available * * '''Backpressures when''' there are `max` batched elements and 1 pending element and downstream backpressures @@ -1176,7 +1271,7 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new batch */ - def batchWeighted[S](max: Long, costFn: function.Function[Out, Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] = + def batchWeighted[S](max: Long, costFn: function.Function[Out, java.lang.Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] = new Flow(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply)) /** @@ -2056,6 +2151,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * * Uses the given [[LoggingAdapter]] for logging. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala index eac492f461..b5bfdcaac1 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala @@ -55,7 +55,7 @@ object Framing { * Creates a Flow that decodes an incoming stream of unstructured byte chunks into a stream of frames, assuming that * incoming frames have a field that encodes their length. * - * If the input stream finishes before the last frame has been fully decoded this Flow will fail the stream reporting + * If the input stream finishes before the last frame has been fully decoded, this Flow will fail the stream reporting * a truncated frame. * * The byte order used for when decoding the field defaults to little-endian. @@ -76,7 +76,7 @@ object Framing { * Creates a Flow that decodes an incoming stream of unstructured byte chunks into a stream of frames, assuming that * incoming frames have a field that encodes their length. * - * If the input stream finishes before the last frame has been fully decoded this Flow will fail the stream reporting + * If the input stream finishes before the last frame has been fully decoded, this Flow will fail the stream reporting * a truncated frame. * * @param fieldLength The length of the "size" field in bytes @@ -93,6 +93,39 @@ object Framing { byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] = scaladsl.Framing.lengthField(fieldLength, fieldOffset, maximumFrameLength, byteOrder).asJava + /** + * Creates a Flow that decodes an incoming stream of unstructured byte chunks into a stream of frames, assuming that + * incoming frames have a field that encodes their length. + * + * If the input stream finishes before the last frame has been fully decoded, this Flow will fail the stream reporting + * a truncated frame. + * + * @param fieldLength The length of the "size" field in bytes + * @param fieldOffset The offset of the field from the beginning of the frame in bytes + * @param maximumFrameLength The maximum length of allowed frames while decoding. If the maximum length is exceeded + * this Flow will fail the stream. This length *includes* the header (i.e the offset and + * the length of the size field) + * @param byteOrder The ''ByteOrder'' to be used when decoding the field + * @param computeFrameSize This function can be supplied if frame size is varied or needs to be computed in a special fashion. + * For example, frame can have a shape like this: `[offset bytes][body size bytes][body bytes][footer bytes]`. + * Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`. + * ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the stage fails otherwise. + * + */ + def lengthField( + fieldLength: Int, + fieldOffset: Int, + maximumFrameLength: Int, + byteOrder: ByteOrder, + computeFrameSize: akka.japi.function.Function2[Array[Byte], Integer, Integer]): Flow[ByteString, ByteString, NotUsed] = + scaladsl.Framing.lengthField( + fieldLength, + fieldOffset, + maximumFrameLength, + byteOrder, + (a: Array[Byte], s: Int) ⇒ computeFrameSize.apply(a, s) + ).asJava + /** * Returns a BidiFlow that implements a simple framing protocol. This is a convenience wrapper over [[Framing#lengthField]] * and simply attaches a length field header of four bytes (using big endian encoding) to outgoing messages, and decodes diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala index d1bbb06aeb..19c1cc3856 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala @@ -206,7 +206,7 @@ object Partition { * @param outputCount number of output ports * @param partitioner function deciding which output each element will be targeted */ - def create[T](outputCount: Int, partitioner: function.Function[T, Int]): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](outputCount: Int, partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] = scaladsl.Partition(outputCount, partitioner = (t: T) ⇒ partitioner.apply(t)) /** @@ -215,7 +215,7 @@ object Partition { * @param outputCount number of output ports * @param partitioner function deciding which output each element will be targeted */ - def create[T](clazz: Class[T], outputCount: Int, partitioner: function.Function[T, Int]): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](clazz: Class[T], outputCount: Int, partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] = create(outputCount, partitioner) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala index 12be1fae17..c183381156 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala @@ -4,6 +4,10 @@ package akka.stream.javadsl import akka.NotUsed +import java.util.function.{ BiFunction, Supplier, ToLongBiFunction } + +import akka.annotation.DoNotInherit +import akka.annotation.ApiMayChange /** * A MergeHub is a special streaming hub that is able to collect streamed elements from a dynamic set of @@ -91,3 +95,129 @@ object BroadcastHub { def of[T](clazz: Class[T]): Sink[T, Source[T, NotUsed]] = of(clazz, 256) } + +/** + * A `PartitionHub` is a special streaming hub that is able to route streamed elements to a dynamic set of consumers. + * It consists of two parts, a [[Sink]] and a [[Source]]. The [[Sink]] e elements from a producer to the + * actually live consumers it has. The selection of consumer is done with a function. Each element can be routed to + * only one consumer. Once the producer has been materialized, the [[Sink]] it feeds into returns a + * materialized value which is the corresponding [[Source]]. This [[Source]] can be materialized an arbitrary number + * of times, where each of the new materializations will receive their elements from the original [[Sink]]. + */ +object PartitionHub { + + /** + * Creates a [[Sink]] that receives elements from its upstream producer and routes them to a dynamic set + * of consumers. After the [[Sink]] returned by this method is materialized, it returns a [[Source]] as materialized + * value. This [[Source]] can be materialized an arbitrary number of times and each materialization will receive the + * elements from the original [[Sink]]. + * + * Every new materialization of the [[Sink]] results in a new, independent hub, which materializes to its own + * [[Source]] for consuming the [[Sink]] of that materialization. + * + * If the original [[Sink]] is failed, then the failure is immediately propagated to all of its materialized + * [[Source]]s (possibly jumping over already buffered elements). If the original [[Sink]] is completed, then + * all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later + * materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are + * cancelled are simply removed from the dynamic set of consumers. + * + * This `statefulSink` should be used when there is a need to keep mutable state in the partition function, + * e.g. for implemening round-robin or sticky session kind of routing. If state is not needed the [[#of]] can + * be more convenient to use. + * + * @param partitioner Function that decides where to route an element. It is a factory of a function to + * to be able to hold stateful variables that are unique for each materialization. The function + * takes two parameters; the first is information about active consumers, including an array of consumer + * identifiers and the second is the stream element. The function should return the selected consumer + * identifier for the given element. The function will never be called when there are no active consumers, + * i.e. there is always at least one element in the array of identifiers. + * @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected. + * This is only used initially when the stage is starting up, i.e. it is not honored when consumers have + * been removed (canceled). + * @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer + * is backpressured. + */ + @ApiMayChange def ofStateful[T](clazz: Class[T], partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]], + startAfterNrOfConsumers: Int, bufferSize: Int): Sink[T, Source[T, NotUsed]] = { + val p: () ⇒ (akka.stream.scaladsl.PartitionHub.ConsumerInfo, T) ⇒ Long = () ⇒ { + val f = partitioner.get() + (info, elem) ⇒ f.applyAsLong(info, elem) + } + akka.stream.scaladsl.PartitionHub.statefulSink[T](p, startAfterNrOfConsumers, bufferSize) + .mapMaterializedValue(_.asJava) + .asJava + } + + @ApiMayChange def ofStateful[T](clazz: Class[T], partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]], + startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] = + ofStateful(clazz, partitioner, startAfterNrOfConsumers, akka.stream.scaladsl.PartitionHub.defaultBufferSize) + + /** + * Creates a [[Sink]] that receives elements from its upstream producer and routes them to a dynamic set + * of consumers. After the [[Sink]] returned by this method is materialized, it returns a [[Source]] as materialized + * value. This [[Source]] can be materialized an arbitrary number of times and each materialization will receive the + * elements from the original [[Sink]]. + * + * Every new materialization of the [[Sink]] results in a new, independent hub, which materializes to its own + * [[Source]] for consuming the [[Sink]] of that materialization. + * + * If the original [[Sink]] is failed, then the failure is immediately propagated to all of its materialized + * [[Source]]s (possibly jumping over already buffered elements). If the original [[Sink]] is completed, then + * all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later + * materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are + * cancelled are simply removed from the dynamic set of consumers. + * + * This `sink` should be used when the routing function is stateless, e.g. based on a hashed value of the + * elements. Otherwise the [[#ofStateful]] can be used to implement more advanced routing logic. + * + * @param partitioner Function that decides where to route an element. The function takes two parameters; + * the first is the number of active consumers and the second is the stream element. The function should + * return the index of the selected consumer for the given element, i.e. int greater than or equal to 0 + * and less than number of consumers. E.g. `(size, elem) -> Math.abs(elem.hashCode()) % size`. + * @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected. + * This is only used initially when the stage is starting up, i.e. it is not honored when consumers have + * been removed (canceled). + * @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer + * is backpressured. + */ + @ApiMayChange def of[T](clazz: Class[T], partitioner: BiFunction[Integer, T, Integer], startAfterNrOfConsumers: Int, + bufferSize: Int): Sink[T, Source[T, NotUsed]] = + akka.stream.scaladsl.PartitionHub.sink[T]( + (size, elem) ⇒ partitioner.apply(size, elem), + startAfterNrOfConsumers, bufferSize) + .mapMaterializedValue(_.asJava) + .asJava + + @ApiMayChange def of[T](clazz: Class[T], partitioner: BiFunction[Integer, T, Integer], startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] = + of(clazz, partitioner, startAfterNrOfConsumers, akka.stream.scaladsl.PartitionHub.defaultBufferSize) + + @DoNotInherit @ApiMayChange trait ConsumerInfo { + + /** + * Sequence of all identifiers of current consumers. + * + * Use this method only if you need to enumerate consumer existing ids. + * When selecting a specific consumerId by its index, prefer using the dedicated [[#consumerIdByIdx]] method instead, + * which is optimised for this use case. + */ + def getConsumerIds: java.util.List[Long] + + /** Obtain consumer identifier by index */ + def consumerIdByIdx(idx: Int): Long + + /** + * Approximate number of buffered elements for a consumer. + * Larger value than other consumers could be an indication of + * that the consumer is slow. + * + * Note that this is a moving target since the elements are + * consumed concurrently. + */ + def queueSize(consumerId: Long): Int + + /** + * Number of attached consumers. + */ + def size: Int + } +} diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Restart.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Restart.scala new file mode 100644 index 0000000000..84ca1daf8c --- /dev/null +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Restart.scala @@ -0,0 +1,126 @@ +/** + * Copyright (C) 2015-2017 Lightbend Inc. + */ +package akka.stream.javadsl + +import akka.NotUsed +import akka.japi.function.Creator +import akka.stream.KillSwitch +import akka.stream.scaladsl.{ Sink, Source } + +import scala.concurrent.duration.FiniteDuration + +/** + * A RestartSource wraps a [[Source]] that gets restarted when it completes or fails. + * + * They are useful for graphs that need to run for longer than the [[Source]] can necessarily guarantee it will, for + * example, for [[Source]] streams that depend on a remote server that may crash or become partitioned. The + * RestartSource ensures that the graph can continue running while the [[Source]] restarts. + */ +object RestartSource { + + /** + * Wrap the given [[Source]] with a [[Source]] that will restart it when it fails or complete using an exponential + * backoff. + * + * This [[Source]] will never emit a complete or failure, since the completion or failure of the wrapped [[Source]] + * is always handled by restarting it. The wrapped [[Source]] can however be cancelled by cancelling this [[Source]]. + * When that happens, the wrapped [[Source]], if currently running will be cancelled, and it will not be restarted. + * This can be triggered simply by the downstream cancelling, or externally by introducing a [[KillSwitch]] right + * after this [[Source]] in the graph. + * + * This uses the same exponential backoff algorithm as [[akka.pattern.Backoff]]. + * + * @param minBackoff minimum (initial) duration until the child actor will + * started again, if it is terminated + * @param maxBackoff the exponential back-off is capped to this duration + * @param randomFactor after calculation of the exponential back-off an additional + * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. + * In order to skip this additional delay pass in `0`. + * @param sourceFactory A factory for producing the [[Source]] to wrap. + */ + def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { + akka.stream.scaladsl.RestartSource.withBackoff(minBackoff, maxBackoff, randomFactor) { () ⇒ + sourceFactory.create().asScala + }.asJava + } +} + +/** + * A RestartSink wraps a [[Sink]] that gets restarted when it completes or fails. + * + * They are useful for graphs that need to run for longer than the [[Sink]] can necessarily guarantee it will, for + * example, for [[Sink]] streams that depend on a remote server that may crash or become partitioned. The + * RestartSink ensures that the graph can continue running while the [[Sink]] restarts. + */ +object RestartSink { + + /** + * Wrap the given [[Sink]] with a [[Sink]] that will restart it when it fails or complete using an exponential + * backoff. + * + * This [[Sink]] will never cancel, since cancellation by the wrapped [[Sink]] is always handled by restarting it. + * The wrapped [[Sink]] can however be completed by feeding a completion or error into this [[Sink]]. When that + * happens, the [[Sink]], if currently running, will terminate and will not be restarted. This can be triggered + * simply by the upstream completing, or externally by introducing a [[KillSwitch]] right before this [[Sink]] in the + * graph. + * + * The restart process is inherently lossy, since there is no coordination between cancelling and the sending of + * messages. When the wrapped [[Sink]] does cancel, this [[Sink]] will backpressure, however any elements already + * sent may have been lost. + * + * This uses the same exponential backoff algorithm as [[akka.pattern.Backoff]]. + * + * @param minBackoff minimum (initial) duration until the child actor will + * started again, if it is terminated + * @param maxBackoff the exponential back-off is capped to this duration + * @param randomFactor after calculation of the exponential back-off an additional + * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. + * In order to skip this additional delay pass in `0`. + * @param sinkFactory A factory for producing the [[Sink]] to wrap. + */ + def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = { + akka.stream.scaladsl.RestartSink.withBackoff(minBackoff, maxBackoff, randomFactor) { () ⇒ + sinkFactory.create().asScala + }.asJava + } +} + +/** + * A RestartFlow wraps a [[Flow]] that gets restarted when it completes or fails. + * + * They are useful for graphs that need to run for longer than the [[Flow]] can necessarily guarantee it will, for + * example, for [[Flow]] streams that depend on a remote server that may crash or become partitioned. The + * RestartFlow ensures that the graph can continue running while the [[Flow]] restarts. + */ +object RestartFlow { + + /** + * Wrap the given [[Flow]] with a [[Flow]] that will restart it when it fails or complete using an exponential + * backoff. + * + * This [[Flow]] will not cancel, complete or emit a failure, until the opposite end of it has been cancelled or + * completed. Any termination by the [[Flow]] before that time will be handled by restarting it. Any termination + * signals sent to this [[Flow]] however will terminate the wrapped [[Flow]], if it's running, and then the [[Flow]] + * will be allowed to terminate without being restarted. + * + * The restart process is inherently lossy, since there is no coordination between cancelling and the sending of + * messages. A termination signal from either end of the wrapped [[Flow]] will cause the other end to be terminated, + * and any in transit messages will be lost. During backoff, this [[Flow]] will backpressure. + * + * This uses the same exponential backoff algorithm as [[akka.pattern.Backoff]]. + * + * @param minBackoff minimum (initial) duration until the child actor will + * started again, if it is terminated + * @param maxBackoff the exponential back-off is capped to this duration + * @param randomFactor after calculation of the exponential back-off an additional + * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. + * In order to skip this additional delay pass in `0`. + * @param flowFactory A factory for producing the [[Flow]] to wrap. + */ + def withBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { + akka.stream.scaladsl.RestartFlow.withBackoff(minBackoff, maxBackoff, randomFactor) { () ⇒ + flowFactory.create().asScala + }.asJava + } +} \ No newline at end of file diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala index 88cb76446e..81be1a2e31 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala @@ -271,6 +271,8 @@ object Sink { * try to create sink with next element * * `fallback` will be executed when there was no elements and completed is received from upstream. + * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. */ def lazyInit[T, M](sinkFactory: function.Function[T, CompletionStage[Sink[T, M]]], fallback: function.Creator[M]): Sink[T, CompletionStage[M]] = new Sink(scaladsl.Sink.lazyInit[T, M]( diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala index 48cabd578b..dbaef24b21 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala @@ -184,13 +184,14 @@ object Source { * Streams the elements of the given future source once it successfully completes. * If the future fails the stream is failed. */ - def fromFutureSource[T, M](future: Future[Graph[SourceShape[T], M]]): javadsl.Source[T, Future[M]] = new Source(scaladsl.Source.fromFutureSource(future)) + def fromFutureSource[T, M](future: Future[_ <: Graph[SourceShape[T], M]]): javadsl.Source[T, Future[M]] = new Source(scaladsl.Source.fromFutureSource(future)) /** * Streams the elements of an asynchronous source once its given `completion` stage completes. * If the `completion` fails the stream is failed with that exception. */ - def fromSourceCompletionStage[T, M](completion: CompletionStage[Graph[SourceShape[T], M]]): javadsl.Source[T, CompletionStage[M]] = new Source(scaladsl.Source.fromSourceCompletionStage(completion)) + def fromSourceCompletionStage[T, M](completion: CompletionStage[_ <: Graph[SourceShape[T], M]]): javadsl.Source[T, CompletionStage[M]] = + new Source(scaladsl.Source.fromSourceCompletionStage(completion)) /** * Elements are emitted periodically with the specified interval. @@ -386,6 +387,8 @@ object Source { * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * @param create - function that is called on stream start and creates/opens resource. * @param read - function that reads data from opened resource. It is called each time backpressure signal * is received. Stream calls close and completes when `read` returns None. @@ -412,6 +415,8 @@ object Source { * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * @param create - function that is called on stream start and creates/opens resource. * @param read - function that reads data from opened resource. It is called each time backpressure signal * is received. Stream calls close and completes when `CompletionStage` from read function returns None. @@ -1068,6 +1073,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * The returned `Iterable` MUST NOT contain `null` values, * as they are illegal as stream elements - according to the Reactive Streams specification. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element or there are still remaining elements * from the previously calculated collection * @@ -1102,6 +1109,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * * The function `f` is always invoked on the elements in the order they arrive. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the CompletionStage returned by the provided function finishes for the next element in sequence * * '''Backpressures when''' the number of CompletionStages reaches the configured parallelism and the downstream @@ -1135,6 +1144,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * The function `f` is always invoked on the elements in the order they arrive (even though the result of the CompletionStages * returned by `f` might be emitted in a different order). * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' any of the CompletionStages returned by the provided function complete * * '''Backpressures when''' the number of CompletionStages reaches the configured parallelism and the downstream backpressures @@ -1151,6 +1162,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap /** * Only pass on those elements that satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns true for the element * * '''Backpressures when''' the given predicate returns true for the element and downstream backpressures @@ -1166,6 +1179,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap /** * Only pass on those elements that NOT satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns false for the element * * '''Backpressures when''' the given predicate returns false for the element and downstream backpressures @@ -1182,6 +1197,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * on which the function is defined as they pass through this processing step. * Non-matching elements are filtered out. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the provided partial function is defined for the element * * '''Backpressures when''' the partial function is defined for the element and downstream backpressures @@ -1247,6 +1264,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * The stream will be completed without producing any elements if `n` is zero * or negative. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the specified number of elements to take has not yet been reached * * '''Backpressures when''' downstream backpressures @@ -1257,7 +1276,7 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * * See also [[Flow.take]], [[Flow.takeWithin]], [[Flow.takeWhile]] */ - def limitWeighted(n: Long)(costFn: function.Function[Out, Long]): javadsl.Source[Out, Mat] = { + def limitWeighted(n: Long)(costFn: function.Function[Out, java.lang.Long]): javadsl.Source[Out, Mat] = { new Source(delegate.limitWeighted(n)(costFn.apply)) } @@ -1289,6 +1308,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * [[akka.stream.Supervision#restart]] current value starts at `zero` again * the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the function scanning the element returns a new element * * '''Backpressures when''' downstream backpressures @@ -1314,6 +1335,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * [[akka.stream.Supervision.Resume]] current value starts at the previous * current value, or zero when it doesn't have one, and the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the future returned by f` completes * * '''Backpressures when''' downstream backpressures @@ -1331,6 +1354,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * after which it also completes. Applies the given function `f` towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * If the function `f` throws an exception and the supervision decision is * [[akka.stream.Supervision#restart]] current value starts at `zero` again * the stream will continue. @@ -1351,6 +1376,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * Applies the given function towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * If the function `f` returns a failure and the supervision decision is * [[akka.stream.Supervision.Restart]] current value starts at `zero` again * the stream will continue. @@ -1370,6 +1397,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * Applies the given function towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' upstream completes * * '''Backpressures when''' downstream backpressures @@ -1478,7 +1507,7 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise * IllegalArgumentException is thrown. */ - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, Long], d: FiniteDuration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: FiniteDuration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] = new Source(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava)) /** @@ -1561,6 +1590,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * Discard elements at the beginning of the stream while predicate is true. * No elements will be dropped after predicate first time returned false. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' predicate returned false and for all following stream elements * * '''Backpressures when''' predicate returned false and downstream backpressures @@ -1624,6 +1655,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -1650,6 +1683,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -1673,6 +1708,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is an aggregated element available * * '''Backpressures when''' there are `max` batched elements and 1 pending element and downstream backpressures @@ -1718,7 +1755,7 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new batch */ - def batchWeighted[S](max: Long, costFn: function.Function[Out, Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] = + def batchWeighted[S](max: Long, costFn: function.Function[Out, java.lang.Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] = new Source(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply)) /** @@ -1822,6 +1859,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * is [[akka.stream.Supervision#resume]] or [[akka.stream.Supervision#restart]] * the element is dropped and the stream and substreams continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' an element for which the grouping function returns a group that has not yet been created. * Emits the new group * @@ -2203,6 +2242,8 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * * Uses the given [[LoggingAdapter]] for logging. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala index 71cba30fdf..db71fbc078 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala @@ -89,8 +89,10 @@ object StreamConverters { /** * Creates a Source from an [[java.io.InputStream]] created by the given function. - * Emitted elements are `chunkSize` sized [[akka.util.ByteString]] elements, - * except the final element, which will be up to `chunkSize` in size. + * Emitted elements are up to `chunkSize` sized [[akka.util.ByteString]] elements. + * The actual size of emitted elements depends how much data the underlying + * [[java.io.InputStream]] returns on each read invocation. Such chunks will + * never be larger than chunkSize though. * * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[akka.stream.ActorAttributes]]. @@ -104,8 +106,10 @@ object StreamConverters { /** * Creates a Source from an [[java.io.InputStream]] created by the given function. - * Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes, - * except the last element, which will be up to 8192 in size. + * Emitted elements are up to 8192 bytes sized [[akka.util.ByteString]] elements. + * The actual size of emitted elements depends how much data the underlying + * [[java.io.InputStream]] returns on each read invocation. Such chunks will + * never be larger than chunkSize though. * * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[akka.stream.ActorAttributes]]. diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala index f545fb98c6..c4dac6041b 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala @@ -106,6 +106,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * Transform this stream by applying the given function to each of the elements * as they pass through this processing step. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures @@ -155,6 +157,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * The returned `Iterable` MUST NOT contain `null` values, * as they are illegal as stream elements - according to the Reactive Streams specification. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element or there are still remaining elements * from the previously calculated collection * @@ -189,6 +193,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * * The function `f` is always invoked on the elements in the order they arrive. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the CompletionStage returned by the provided function finishes for the next element in sequence * * '''Backpressures when''' the number of CompletionStages reaches the configured parallelism and the downstream @@ -222,6 +228,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * The function `f` is always invoked on the elements in the order they arrive (even though the result of the CompletionStages * returned by `f` might be emitted in a different order). * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' any of the CompletionStages returned by the provided function complete * * '''Backpressures when''' the number of CompletionStages reaches the configured parallelism and the downstream backpressures @@ -238,6 +246,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo /** * Only pass on those elements that satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns true for the element * * '''Backpressures when''' the given predicate returns true for the element and downstream backpressures @@ -253,6 +263,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo /** * Only pass on those elements that NOT satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns false for the element * * '''Backpressures when''' the given predicate returns false for the element and downstream backpressures @@ -269,6 +281,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * on which the function is defined as they pass through this processing step. * Non-matching elements are filtered out. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the provided partial function is defined for the element * * '''Backpressures when''' the partial function is defined for the element and downstream backpressures @@ -334,6 +348,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * The stream will be completed without producing any elements if `n` is zero * or negative. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the specified number of elements to take has not yet been reached * * '''Backpressures when''' downstream backpressures @@ -344,7 +360,7 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * * See also [[Flow.take]], [[Flow.takeWithin]], [[Flow.takeWhile]] */ - def limitWeighted(n: Long)(costFn: function.Function[Out, Long]): javadsl.SubFlow[In, Out, Mat] = { + def limitWeighted(n: Long)(costFn: function.Function[Out, java.lang.Long]): javadsl.SubFlow[In, Out, Mat] = { new SubFlow(delegate.limitWeighted(n)(costFn.apply)) } @@ -377,6 +393,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * [[akka.stream.Supervision#restart]] current value starts at `zero` again * the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the function scanning the element returns a new element * * '''Backpressures when''' downstream backpressures @@ -402,6 +420,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * [[akka.stream.Supervision.Resume]] current value starts at the previous * current value, or zero when it doesn't have one, and the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the future returned by f` completes * * '''Backpressures when''' downstream backpressures @@ -420,6 +440,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * after which it also completes. Applies the given function `f` towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * If the function `f` throws an exception and the supervision decision is * [[akka.stream.Supervision#restart]] current value starts at `zero` again * the stream will continue. @@ -440,6 +462,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * Applies the given function towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * If the function `f` returns a failure and the supervision decision is * [[akka.stream.Supervision.Restart]] current value starts at `zero` again * the stream will continue. @@ -459,6 +483,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * Applies the given function towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' upstream completes * * '''Backpressures when''' downstream backpressures @@ -568,7 +594,7 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise * IllegalArgumentException is thrown. */ - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, Long], d: FiniteDuration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: FiniteDuration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] = new SubFlow(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava)) /** @@ -656,6 +682,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * The stream will be completed without producing any elements if predicate is false for * the first stream element. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the predicate is true * * '''Backpressures when''' downstream backpressures @@ -671,6 +699,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * Discard elements at the beginning of the stream while predicate is true. * All elements will be taken after predicate returns false first time. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' predicate returned false and for all following stream elements * * '''Backpressures when''' predicate returned false and downstream backpressures @@ -824,6 +854,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -852,6 +884,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -876,6 +910,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is an aggregated element available * * '''Backpressures when''' there are `max` batched elements and 1 pending element and downstream backpressures @@ -921,7 +957,7 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new batch */ - def batchWeighted[S](max: Long, costFn: function.Function[Out, Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] = + def batchWeighted[S](max: Long, costFn: function.Function[Out, java.lang.Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] = new SubFlow(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply)) /** @@ -1434,6 +1470,8 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * * Uses the given [[LoggingAdapter]] for logging. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala index 9d99235db9..fd9ebae68e 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala @@ -106,6 +106,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * Transform this stream by applying the given function to each of the elements * as they pass through this processing step. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures @@ -155,6 +157,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * The returned `Iterable` MUST NOT contain `null` values, * as they are illegal as stream elements - according to the Reactive Streams specification. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element or there are still remaining elements * from the previously calculated collection * @@ -189,6 +193,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * * The function `f` is always invoked on the elements in the order they arrive. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the CompletionStage returned by the provided function finishes for the next element in sequence * * '''Backpressures when''' the number of CompletionStages reaches the configured parallelism and the downstream @@ -222,6 +228,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * The function `f` is always invoked on the elements in the order they arrive (even though the result of the futures * returned by `f` might be emitted in a different order). * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' any of the CompletionStage returned by the provided function complete * * '''Backpressures when''' the number of CompletionStage reaches the configured parallelism and the downstream backpressures @@ -238,6 +246,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source /** * Only pass on those elements that satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns true for the element * * '''Backpressures when''' the given predicate returns true for the element and downstream backpressures @@ -253,6 +263,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source /** * Only pass on those elements that NOT satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns false for the element * * '''Backpressures when''' the given predicate returns false for the element and downstream backpressures @@ -269,6 +281,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * on which the function is defined as they pass through this processing step. * Non-matching elements are filtered out. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the provided partial function is defined for the element * * '''Backpressures when''' the partial function is defined for the element and downstream backpressures @@ -350,6 +364,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * The stream will be completed without producing any elements if `n` is zero * or negative. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the specified number of elements to take has not yet been reached * * '''Backpressures when''' downstream backpressures @@ -360,7 +376,7 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * * See also [[Flow.take]], [[Flow.takeWithin]], [[Flow.takeWhile]] */ - def limitWeighted(n: Long)(costFn: function.Function[Out, Long]): javadsl.SubSource[Out, Mat] = { + def limitWeighted(n: Long)(costFn: function.Function[Out, java.lang.Long]): javadsl.SubSource[Out, Mat] = { new SubSource(delegate.limitWeighted(n)(costFn.apply)) } @@ -377,6 +393,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * [[akka.stream.Supervision#restart]] current value starts at `zero` again * the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the function scanning the element returns a new element * * '''Backpressures when''' downstream backpressures @@ -402,6 +420,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * [[akka.stream.Supervision.Resume]] current value starts at the previous * current value, or zero when it doesn't have one, and the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the future returned by f` completes * * '''Backpressures when''' downstream backpressures @@ -420,6 +440,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * after which it also completes. Applies the given function `f` towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * If the function `f` throws an exception and the supervision decision is * [[akka.stream.Supervision#restart]] current value starts at `zero` again * the stream will continue. @@ -459,6 +481,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * Applies the given function towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' upstream completes * * '''Backpressures when''' downstream backpressures @@ -568,7 +592,7 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise * IllegalArgumentException is thrown. */ - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, Long], d: FiniteDuration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: FiniteDuration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] = new SubSource(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava)) /** @@ -628,6 +652,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * The stream will be completed without producing any elements if predicate is false for * the first stream element. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the predicate is true * * '''Backpressures when''' downstream backpressures @@ -643,6 +669,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * Discard elements at the beginning of the stream while predicate is true. * All elements will be taken after predicate returns false first time. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' predicate returned false and for all following stream elements * * '''Backpressures when''' predicate returned false and downstream backpressures @@ -818,6 +846,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -846,6 +876,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -870,6 +902,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is an aggregated element available * * '''Backpressures when''' there are `max` batched elements and 1 pending element and downstream backpressures @@ -915,7 +949,7 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new batch */ - def batchWeighted[S](max: Long, costFn: function.Function[Out, Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] = + def batchWeighted[S](max: Long, costFn: function.Function[Out, java.lang.Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] = new SubSource(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply)) /** @@ -1422,6 +1456,8 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * * Uses the given [[LoggingAdapter]] for logging. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala index 45b29664b3..fc433fcf53 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala @@ -57,19 +57,21 @@ final class Flow[-In, +Out, +Mat]( /** * Connect this [[Flow]] to a [[Sink]], concatenating the processing steps of both. * {{{ - * +----------------------------+ - * | Resulting Sink | - * | | - * | +------+ +------+ | - * | | | | | | - * In ~~> | flow | ~Out~> | sink | | - * | | | | | | - * | +------+ +------+ | - * +----------------------------+ + * +------------------------------+ + * | Resulting Sink[In, Mat] | + * | | + * | +------+ +------+ | + * | | | | | | + * In ~~> | flow | ~~Out~~> | sink | | + * | | Mat| | M| | + * | +------+ +------+ | + * +------------------------------+ * }}} * The materialized value of the combined [[Sink]] will be the materialized * value of the current flow (ignoring the given Sink’s value), use * [[Flow#toMat[Mat2* toMat]] if a different strategy is needed. + * + * See also [[toMat]] when access to materialized values of the parameter is needed. */ def to[Mat2](sink: Graph[SinkShape[Out], Mat2]): Sink[In, Mat] = toMat(sink)(Keep.left) @@ -77,12 +79,12 @@ final class Flow[-In, +Out, +Mat]( * Connect this [[Flow]] to a [[Sink]], concatenating the processing steps of both. * {{{ * +----------------------------+ - * | Resulting Sink | + * | Resulting Sink[In, M2] | * | | * | +------+ +------+ | * | | | | | | * In ~~> | flow | ~Out~> | sink | | - * | | | | | | + * | | Mat| | M| | * | +------+ +------+ | * +----------------------------+ * }}} @@ -316,10 +318,25 @@ object Flow { * Creates a `Flow` from a `Sink` and a `Source` where the Flow's input * will be sent to the Sink and the Flow's output will come from the Source. * + * The resulting flow can be visualized as: + * {{{ + * +----------------------------------------------+ + * | Resulting Flow[I, O, NotUsed] | + * | | + * | +---------+ +-----------+ | + * | | | | | | + * I ~~> | Sink[I] | [no-connection!] | Source[O] | ~~> O + * | | | | | | + * | +---------+ +-----------+ | + * +----------------------------------------------+ + * }}} + * * The completion of the Sink and Source sides of a Flow constructed using * this method are independent. So if the Sink receives a completion signal, * the Source side will remain unaware of that. If you are looking to couple * the termination signals of the two sides use `Flow.fromSinkAndSourceCoupled` instead. + * + * See also [[fromSinkAndSourceMat]] when access to materialized values of the parameters is needed. */ def fromSinkAndSource[I, O](sink: Graph[SinkShape[I], _], source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] = fromSinkAndSourceMat(sink, source)(Keep.none) @@ -328,6 +345,19 @@ object Flow { * Creates a `Flow` from a `Sink` and a `Source` where the Flow's input * will be sent to the Sink and the Flow's output will come from the Source. * + * The resulting flow can be visualized as: + * {{{ + * +-------------------------------------------------------+ + * | Resulting Flow[I, O, M] | + * | | + * | +-------------+ +---------------+ | + * | | | | | | + * I ~~> | Sink[I, M1] | [no-connection!] | Source[O, M2] | ~~> O + * | | | | | | + * | +-------------+ +---------------+ | + * +------------------------------------------------------+ + * }}} + * * The completion of the Sink and Source sides of a Flow constructed using * this method are independent. So if the Sink receives a completion signal, * the Source side will remain unaware of that. If you are looking to couple @@ -343,6 +373,19 @@ object Flow { * Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them. * Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages. * + * The resulting flow can be visualized as: + * {{{ + * +---------------------------------------------+ + * | Resulting Flow[I, O, NotUsed] | + * | | + * | +---------+ +-----------+ | + * | | | | | | + * I ~~> | Sink[I] | ~~~(coupled)~~~ | Source[O] | ~~> O + * | | | | | | + * | +---------+ +-----------+ | + * +---------------------------------------------+ + * }}} + * * E.g. if the emitted [[Flow]] gets a cancellation, the [[Source]] of course is cancelled, * however the Sink will also be completed. The table below illustrates the effects in detail: * @@ -384,6 +427,7 @@ object Flow { * * * + * See also [[fromSinkAndSourceCoupledMat]] when access to materialized values of the parameters is needed. */ def fromSinkAndSourceCoupled[I, O](sink: Graph[SinkShape[I], _], source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] = fromSinkAndSourceCoupledMat(sink, source)(Keep.none) @@ -392,6 +436,19 @@ object Flow { * Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them. * Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages. * + * The resulting flow can be visualized as: + * {{{ + * +-----------------------------------------------------+ + * | Resulting Flow[I, O, M] | + * | | + * | +-------------+ +---------------+ | + * | | | | | | + * I ~~> | Sink[I, M1] | ~~~(coupled)~~~ | Source[O, M2] | ~~> O + * | | | | | | + * | +-------------+ +---------------+ | + * +-----------------------------------------------------+ + * }}} + * * E.g. if the emitted [[Flow]] gets a cancellation, the [[Source]] of course is cancelled, * however the Sink will also be completed. The table on [[Flow.fromSinkAndSourceCoupled]] * illustrates the effects in detail. @@ -474,19 +531,21 @@ trait FlowOps[+Out, +Mat] { /** * Transform this [[Flow]] by appending the given processing steps. * {{{ - * +----------------------------+ - * | Resulting Flow | - * | | - * | +------+ +------+ | - * | | | | | | - * In ~~> | this | ~Out~> | flow | ~~> T - * | | | | | | - * | +------+ +------+ | - * +----------------------------+ + * +---------------------------------+ + * | Resulting Flow[In, T, Mat] | + * | | + * | +------+ +------+ | + * | | | | | | + * In ~~> | this | ~~Out~~> | flow | ~~> T + * | | Mat| | M| | + * | +------+ +------+ | + * +---------------------------------+ * }}} * The materialized value of the combined [[Flow]] will be the materialized * value of the current flow (ignoring the other Flow’s value), use * [[Flow#viaMat viaMat]] if a different strategy is needed. + * + * See also [[FlowOpsMat.viaMat]] when access to materialized values of the parameter is needed. */ def via[T, Mat2](flow: Graph[FlowShape[Out, T], Mat2]): Repr[T] @@ -587,6 +646,8 @@ trait FlowOps[+Out, +Mat] { * Transform this stream by applying the given function to each of the elements * as they pass through this processing step. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures @@ -628,6 +689,8 @@ trait FlowOps[+Out, +Mat] { * The returned `Iterable` MUST NOT contain `null` values, * as they are illegal as stream elements - according to the Reactive Streams specification. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element or there are still remaining elements * from the previously calculated collection * @@ -661,6 +724,8 @@ trait FlowOps[+Out, +Mat] { * * The function `f` is always invoked on the elements in the order they arrive. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the Future returned by the provided function finishes for the next element in sequence * * '''Backpressures when''' the number of futures reaches the configured parallelism and the downstream @@ -693,6 +758,8 @@ trait FlowOps[+Out, +Mat] { * The function `f` is always invoked on the elements in the order they arrive (even though the result of the futures * returned by `f` might be emitted in a different order). * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' any of the Futures returned by the provided function complete * * '''Backpressures when''' the number of futures reaches the configured parallelism and the downstream backpressures @@ -708,6 +775,8 @@ trait FlowOps[+Out, +Mat] { /** * Only pass on those elements that satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns true for the element * * '''Backpressures when''' the given predicate returns true for the element and downstream backpressures @@ -721,6 +790,8 @@ trait FlowOps[+Out, +Mat] { /** * Only pass on those elements that NOT satisfy the given predicate. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the given predicate returns false for the element * * '''Backpressures when''' the given predicate returns false for the element and downstream backpressures @@ -762,6 +833,8 @@ trait FlowOps[+Out, +Mat] { * The stream will be completed without producing any elements if predicate is false for * the first stream element. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the predicate is true * * '''Backpressures when''' downstream backpressures @@ -778,6 +851,8 @@ trait FlowOps[+Out, +Mat] { * Discard elements at the beginning of the stream while predicate is true. * All elements will be taken after predicate returns false first time. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' predicate returned false and for all following stream elements * * '''Backpressures when''' predicate returned false and downstream backpressures @@ -793,6 +868,8 @@ trait FlowOps[+Out, +Mat] { * on which the function is defined as they pass through this processing step. * Non-matching elements are filtered out. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the provided partial function is defined for the element * * '''Backpressures when''' the partial function is defined for the element and downstream backpressures @@ -852,6 +929,8 @@ trait FlowOps[+Out, +Mat] { * requested from upstream publishers that will then not be processed downstream * of this step. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' upstream emits and the accumulated cost has not reached max * * '''Backpressures when''' downstream backpressures @@ -893,6 +972,8 @@ trait FlowOps[+Out, +Mat] { * [[akka.stream.Supervision.Restart]] current value starts at `zero` again * the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the function scanning the element returns a new element * * '''Backpressures when''' downstream backpressures @@ -919,6 +1000,8 @@ trait FlowOps[+Out, +Mat] { * [[akka.stream.Supervision.Resume]] current value starts at the previous * current value, or zero when it doesn't have one, and the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the future returned by f` completes * * '''Backpressures when''' downstream backpressures @@ -940,6 +1023,8 @@ trait FlowOps[+Out, +Mat] { * [[akka.stream.Supervision.Restart]] current value starts at `zero` again * the stream will continue. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' upstream completes * * '''Backpressures when''' downstream backpressures @@ -957,6 +1042,8 @@ trait FlowOps[+Out, +Mat] { * Applies the given function towards its current and next value, * yielding the next current value. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * If the function `f` returns a failure and the supervision decision is * [[akka.stream.Supervision.Restart]] current value starts at `zero` again * the stream will continue. @@ -983,6 +1070,8 @@ trait FlowOps[+Out, +Mat] { * which is semantically in-line with that Scala's standard library collections * do in such situations. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' upstream completes * * '''Backpressures when''' downstream backpressures @@ -1205,6 +1294,8 @@ trait FlowOps[+Out, +Mat] { * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -1232,6 +1323,8 @@ trait FlowOps[+Out, +Mat] { * This element only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is a conflated element available * * '''Backpressures when''' never @@ -1254,6 +1347,8 @@ trait FlowOps[+Out, +Mat] { * This only rolls up elements if the upstream is faster, but if the downstream is faster it will not * duplicate elements. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' downstream stops backpressuring and there is an aggregated element available * * '''Backpressures when''' there are `max` batched elements and 1 pending element and downstream backpressures @@ -1404,6 +1499,8 @@ trait FlowOps[+Out, +Mat] { * * Function `f` MUST NOT return `null`. This will throw exception and trigger supervision decision mechanism. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' an element for which the grouping function returns a group that has not yet been created. * Emits the new group * @@ -1783,6 +1880,8 @@ trait FlowOps[+Out, +Mat] { * Uses implicit [[LoggingAdapter]] if available, otherwise uses an internally created one, * which uses `akka.stream.Log` as it's source (use this class to configure slf4j loggers). * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * '''Emits when''' the mapping function returns an element * * '''Backpressures when''' downstream backpressures @@ -2043,19 +2142,22 @@ trait FlowOps[+Out, +Mat] { /** * Connect this [[Flow]] to a [[Sink]], concatenating the processing steps of both. * {{{ - * +----------------------------+ - * | Resulting Sink | - * | | - * | +------+ +------+ | - * | | | | | | - * In ~~> | flow | ~Out~> | sink | | - * | | | | | | - * | +------+ +------+ | - * +----------------------------+ + * +------------------------------+ + * | Resulting Sink[In, Mat] | + * | | + * | +------+ +------+ | + * | | | | | | + * In ~~> | flow | ~~Out~~> | sink | | + * | | Mat| | M| | + * | +------+ +------+ | + * +------------------------------+ * }}} + * * The materialized value of the combined [[Sink]] will be the materialized * value of the current flow (ignoring the given Sink’s value), use * [[Flow#toMat[Mat2* toMat]] if a different strategy is needed. + * + * See also [[FlowOpsMat.toMat]] when access to materialized values of the parameter is needed. */ def to[Mat2](sink: Graph[SinkShape[Out], Mat2]): Closed @@ -2124,15 +2226,15 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { /** * Transform this [[Flow]] by appending the given processing steps. * {{{ - * +----------------------------+ - * | Resulting Flow | - * | | - * | +------+ +------+ | - * | | | | | | - * In ~~> | this | ~Out~> | flow | ~~> T - * | | | | | | - * | +------+ +------+ | - * +----------------------------+ + * +---------------------------------+ + * | Resulting Flow[In, T, M2] | + * | | + * | +------+ +------+ | + * | | | | | | + * In ~~> | this | ~~Out~~> | flow | ~~> T + * | | Mat| | M| | + * | +------+ +------+ | + * +---------------------------------+ * }}} * The `combine` function is used to compose the materialized values of this flow and that * flow into the materialized value of the resulting Flow. diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala index 00aab0b6b6..c67cdda2b0 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala @@ -39,7 +39,7 @@ object Framing { * Creates a Flow that decodes an incoming stream of unstructured byte chunks into a stream of frames, assuming that * incoming frames have a field that encodes their length. * - * If the input stream finishes before the last frame has been fully decoded this Flow will fail the stream reporting + * If the input stream finishes before the last frame has been fully decoded, this Flow will fail the stream reporting * a truncated frame. * * @param fieldLength The length of the "size" field in bytes @@ -59,6 +59,36 @@ object Framing { .named("lengthFieldFraming") } + /** + * Creates a Flow that decodes an incoming stream of unstructured byte chunks into a stream of frames, assuming that + * incoming frames have a field that encodes their length. + * + * If the input stream finishes before the last frame has been fully decoded, this Flow will fail the stream reporting + * a truncated frame. + * + * @param fieldLength The length of the "size" field in bytes + * @param fieldOffset The offset of the field from the beginning of the frame in bytes + * @param maximumFrameLength The maximum length of allowed frames while decoding. If the maximum length is exceeded + * this Flow will fail the stream. This length *includes* the header (i.e the offset and + * the length of the size field) + * @param byteOrder The ''ByteOrder'' to be used when decoding the field + * @param computeFrameSize This function can be supplied if frame size is varied or needs to be computed in a special fashion. + * For example, frame can have a shape like this: `[offset bytes][body size bytes][body bytes][footer bytes]`. + * Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`. + * ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the stage fails otherwise. + * + */ + def lengthField( + fieldLength: Int, + fieldOffset: Int, + maximumFrameLength: Int, + byteOrder: ByteOrder, + computeFrameSize: (Array[Byte], Int) ⇒ Int): Flow[ByteString, ByteString, NotUsed] = { + require(fieldLength >= 1 && fieldLength <= 4, "Length field length must be 1, 2, 3 or 4.") + Flow[ByteString].via(new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder, Some(computeFrameSize))) + .named("lengthFieldFraming") + } + /** * Returns a BidiFlow that implements a simple framing protocol. This is a convenience wrapper over [[Framing#lengthField]] * and simply attaches a length field header of four bytes (using big endian encoding) to outgoing messages, and decodes @@ -224,7 +254,18 @@ object Framing { val lengthFieldLength: Int, val lengthFieldOffset: Int, val maximumFrameLength: Int, - val byteOrder: ByteOrder) extends GraphStage[FlowShape[ByteString, ByteString]] { + val byteOrder: ByteOrder, + computeFrameSize: Option[(Array[Byte], Int) ⇒ Int]) extends GraphStage[FlowShape[ByteString, ByteString]] { + + //for the sake of binary compatibility + def this( + lengthFieldLength: Int, + lengthFieldOffset: Int, + maximumFrameLength: Int, + byteOrder: ByteOrder) { + this(lengthFieldLength, lengthFieldOffset, maximumFrameLength, byteOrder, None) + } + private val minimumChunkSize = lengthFieldOffset + lengthFieldLength private val intDecoder = byteOrder match { case ByteOrder.BIG_ENDIAN ⇒ bigEndianDecoder @@ -263,11 +304,16 @@ object Framing { pushFrame() } else if (buffSize >= minimumChunkSize) { val parsedLength = intDecoder(buffer.iterator.drop(lengthFieldOffset), lengthFieldLength) - frameSize = parsedLength + minimumChunkSize + frameSize = computeFrameSize match { + case Some(f) ⇒ f(buffer.take(lengthFieldOffset).toArray, parsedLength) + case None ⇒ parsedLength + minimumChunkSize + } if (frameSize > maximumFrameLength) { failStage(new FramingException(s"Maximum allowed frame size is $maximumFrameLength but decoded frame header reported size $frameSize")) } else if (parsedLength < 0) { failStage(new FramingException(s"Decoded frame header reported negative size $parsedLength")) + } else if (frameSize < minimumChunkSize) { + failStage(new FramingException(s"Computed frame size $frameSize is less than minimum chunk size $minimumChunkSize")) } else if (buffSize >= frameSize) { pushFrame() } else tryPull() diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala index c470cfef62..781b0deb9f 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala @@ -6,7 +6,6 @@ package akka.stream.scaladsl import java.util.SplittableRandom import akka.NotUsed -import akka.dispatch.forkjoin.ThreadLocalRandom import akka.stream._ import akka.stream.impl._ import akka.stream.impl.fusing.GraphStages @@ -877,9 +876,9 @@ object ZipWith extends ZipWithApply * * An `Unzip` has one `in` port and one `left` and one `right` output port. * - * '''Emits when''' all of the outputs stops backpressuring and there is an input element available + * '''Emits when''' all of the outputs stop backpressuring and there is an input element available * - * '''Backpressures when''' any of the outputs backpressures + * '''Backpressures when''' any of the outputs backpressure * * '''Completes when''' upstream completes * @@ -893,7 +892,17 @@ object Unzip { } /** - * Combine the elements of multiple streams into a stream of the combined elements. + * Takes a stream of pair elements and splits each pair to two output streams. + * + * An `Unzip` has one `in` port and one `left` and one `right` output port. + * + * '''Emits when''' all of the outputs stop backpressuring and there is an input element available + * + * '''Backpressures when''' any of the outputs backpressure + * + * '''Completes when''' upstream completes + * + * '''Cancels when''' any downstream cancels */ final class Unzip[A, B]() extends UnzipWith2[(A, B), A, B](ConstantFun.scalaIdentityFunction) { override def toString = "Unzip" @@ -902,9 +911,9 @@ final class Unzip[A, B]() extends UnzipWith2[(A, B), A, B](ConstantFun.scalaIden /** * Transforms each element of input stream into multiple streams using a splitter function. * - * '''Emits when''' all of the outputs stops backpressuring and there is an input element available + * '''Emits when''' all of the outputs stop backpressuring and there is an input element available * - * '''Backpressures when''' any of the outputs backpressures + * '''Backpressures when''' any of the outputs backpressure * * '''Completes when''' upstream completes * diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala index e14b444f9a..0d0f13a480 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala @@ -3,6 +3,7 @@ */ package akka.stream.scaladsl +import java.util import java.util.concurrent.atomic.{ AtomicLong, AtomicReference } import akka.NotUsed @@ -13,6 +14,17 @@ import akka.stream.stage._ import scala.annotation.tailrec import scala.concurrent.{ Future, Promise } import scala.util.{ Failure, Success, Try } +import java.util.Arrays +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicReferenceArray + +import scala.collection.immutable +import scala.collection.mutable.LongMap +import scala.collection.immutable.Queue +import akka.annotation.InternalApi +import akka.annotation.DoNotInherit +import akka.annotation.ApiMayChange /** * A MergeHub is a special streaming hub that is able to collect streamed elements from a dynamic set of @@ -107,8 +119,7 @@ private[akka] class MergeHub[T](perProducerBufferSize: Int) extends GraphStageWi private[this] val demands = scala.collection.mutable.LongMap.empty[InputState] private[this] val wakeupCallback = getAsyncCallback[NotUsed]((_) ⇒ // We are only allowed to dequeue if we are not backpressured. See comment in tryProcessNext() for details. - if (isAvailable(out)) tryProcessNext(firstAttempt = true) - ) + if (isAvailable(out)) tryProcessNext(firstAttempt = true)) setHandler(out, this) @@ -291,7 +302,7 @@ object BroadcastHub { * Creates a [[Sink]] that receives elements from its upstream producer and broadcasts them to a dynamic set * of consumers. After the [[Sink]] returned by this method is materialized, it returns a [[Source]] as materialized * value. This [[Source]] can be materialized an arbitrary number of times and each materialization will receive the - * broadcast elements form the ofiginal [[Sink]]. + * broadcast elements from the original [[Sink]]. * * Every new materialization of the [[Sink]] results in a new, independent hub, which materializes to its own * [[Source]] for consuming the [[Sink]] of that materialization. @@ -693,3 +704,554 @@ private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMater (logic, Source.fromGraph(source)) } } + +/** + * A `PartitionHub` is a special streaming hub that is able to route streamed elements to a dynamic set of consumers. + * It consists of two parts, a [[Sink]] and a [[Source]]. The [[Sink]] e elements from a producer to the + * actually live consumers it has. The selection of consumer is done with a function. Each element can be routed to + * only one consumer. Once the producer has been materialized, the [[Sink]] it feeds into returns a + * materialized value which is the corresponding [[Source]]. This [[Source]] can be materialized an arbitrary number + * of times, where each of the new materializations will receive their elements from the original [[Sink]]. + */ +object PartitionHub { + + /** + * INTERNAL API + */ + @InternalApi private[akka] val defaultBufferSize = 256 + + /** + * Creates a [[Sink]] that receives elements from its upstream producer and routes them to a dynamic set + * of consumers. After the [[Sink]] returned by this method is materialized, it returns a [[Source]] as materialized + * value. This [[Source]] can be materialized an arbitrary number of times and each materialization will receive the + * elements from the original [[Sink]]. + * + * Every new materialization of the [[Sink]] results in a new, independent hub, which materializes to its own + * [[Source]] for consuming the [[Sink]] of that materialization. + * + * If the original [[Sink]] is failed, then the failure is immediately propagated to all of its materialized + * [[Source]]s (possibly jumping over already buffered elements). If the original [[Sink]] is completed, then + * all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later + * materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are + * cancelled are simply removed from the dynamic set of consumers. + * + * This `statefulSink` should be used when there is a need to keep mutable state in the partition function, + * e.g. for implemening round-robin or sticky session kind of routing. If state is not needed the [[#sink]] can + * be more convenient to use. + * + * @param partitioner Function that decides where to route an element. It is a factory of a function to + * to be able to hold stateful variables that are unique for each materialization. The function + * takes two parameters; the first is information about active consumers, including an array of consumer + * identifiers and the second is the stream element. The function should return the selected consumer + * identifier for the given element. The function will never be called when there are no active consumers, + * i.e. there is always at least one element in the array of identifiers. + * @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected. + * This is only used initially when the stage is starting up, i.e. it is not honored when consumers have + * been removed (canceled). + * @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer + * is backpressured. + */ + @ApiMayChange def statefulSink[T](partitioner: () ⇒ (ConsumerInfo, T) ⇒ Long, startAfterNrOfConsumers: Int, + bufferSize: Int = defaultBufferSize): Sink[T, Source[T, NotUsed]] = + Sink.fromGraph(new PartitionHub[T](partitioner, startAfterNrOfConsumers, bufferSize)) + + /** + * Creates a [[Sink]] that receives elements from its upstream producer and routes them to a dynamic set + * of consumers. After the [[Sink]] returned by this method is materialized, it returns a [[Source]] as materialized + * value. This [[Source]] can be materialized an arbitrary number of times and each materialization will receive the + * elements from the original [[Sink]]. + * + * Every new materialization of the [[Sink]] results in a new, independent hub, which materializes to its own + * [[Source]] for consuming the [[Sink]] of that materialization. + * + * If the original [[Sink]] is failed, then the failure is immediately propagated to all of its materialized + * [[Source]]s (possibly jumping over already buffered elements). If the original [[Sink]] is completed, then + * all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later + * materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are + * cancelled are simply removed from the dynamic set of consumers. + * + * This `sink` should be used when the routing function is stateless, e.g. based on a hashed value of the + * elements. Otherwise the [[#statefulSink]] can be used to implement more advanced routing logic. + * + * @param partitioner Function that decides where to route an element. The function takes two parameters; + * the first is the number of active consumers and the second is the stream element. The function should + * return the index of the selected consumer for the given element, i.e. int greater than or equal to 0 + * and less than number of consumers. E.g. `(size, elem) => math.abs(elem.hashCode) % size`. + * @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected. + * This is only used initially when the stage is starting up, i.e. it is not honored when consumers have + * been removed (canceled). + * @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer + * is backpressured. + */ + @ApiMayChange + def sink[T](partitioner: (Int, T) ⇒ Int, startAfterNrOfConsumers: Int, + bufferSize: Int = defaultBufferSize): Sink[T, Source[T, NotUsed]] = + statefulSink(() ⇒ (info, elem) ⇒ info.consumerIdByIdx(partitioner(info.size, elem)), startAfterNrOfConsumers, bufferSize) + + @DoNotInherit @ApiMayChange trait ConsumerInfo extends akka.stream.javadsl.PartitionHub.ConsumerInfo { + + /** + * Sequence of all identifiers of current consumers. + * + * Use this method only if you need to enumerate consumer existing ids. + * When selecting a specific consumerId by its index, prefer using the dedicated [[#consumerIdByIdx]] method instead, + * which is optimised for this use case. + */ + def consumerIds: immutable.IndexedSeq[Long] + + /** Obtain consumer identifier by index */ + def consumerIdByIdx(idx: Int): Long + + /** + * Approximate number of buffered elements for a consumer. + * Larger value than other consumers could be an indication of + * that the consumer is slow. + * + * Note that this is a moving target since the elements are + * consumed concurrently. + */ + def queueSize(consumerId: Long): Int + + /** + * Number of attached consumers. + */ + def size: Int + + } + + /** + * INTERNAL API + */ + @InternalApi private[akka] object Internal { + sealed trait ConsumerEvent + case object Wakeup extends ConsumerEvent + final case class HubCompleted(failure: Option[Throwable]) extends ConsumerEvent + case object Initialize extends ConsumerEvent + + sealed trait HubEvent + case object RegistrationPending extends HubEvent + final case class UnRegister(id: Long) extends HubEvent + final case class NeedWakeup(consumer: Consumer) extends HubEvent + final case class Consumer(id: Long, callback: AsyncCallback[ConsumerEvent]) + case object TryPull extends HubEvent + + case object Completed + + sealed trait HubState + final case class Open(callbackFuture: Future[AsyncCallback[HubEvent]], registrations: List[Consumer]) extends HubState + final case class Closed(failure: Option[Throwable]) extends HubState + + // The reason for the two implementations here is that the common case (as I see it) is to have a few (< 100) + // consumers over the lifetime of the hub but we must of course also support more. + // FixedQueues is more efficient than ConcurrentHashMap so we use that for the first 128 consumers. + private val FixedQueues = 128 + + // Need the queue to be pluggable to be able to use a more performant (less general) + // queue in Artery + trait PartitionQueue { + def init(id: Long): Unit + def totalSize: Int + def size(id: Long): Int + def isEmpty(id: Long): Boolean + def nonEmpty(id: Long): Boolean + def offer(id: Long, elem: Any): Unit + def poll(id: Long): AnyRef + def remove(id: Long): Unit + } + + object ConsumerQueue { + val empty = ConsumerQueue(Queue.empty, 0) + } + + final case class ConsumerQueue(queue: Queue[Any], size: Int) { + def enqueue(elem: Any): ConsumerQueue = + new ConsumerQueue(queue.enqueue(elem), size + 1) + + def isEmpty: Boolean = size == 0 + + def head: Any = queue.head + + def tail: ConsumerQueue = + new ConsumerQueue(queue.tail, size - 1) + } + + class PartitionQueueImpl extends PartitionQueue { + private val queues1 = new AtomicReferenceArray[ConsumerQueue](FixedQueues) + private val queues2 = new ConcurrentHashMap[Long, ConsumerQueue] + private val _totalSize = new AtomicInteger + + override def init(id: Long): Unit = { + if (id < FixedQueues) + queues1.set(id.toInt, ConsumerQueue.empty) + else + queues2.put(id, ConsumerQueue.empty) + } + + override def totalSize: Int = _totalSize.get + + def size(id: Long): Int = { + val queue = + if (id < FixedQueues) queues1.get(id.toInt) + else queues2.get(id) + if (queue eq null) + throw new IllegalArgumentException(s"Invalid stream identifier: $id") + queue.size + } + + override def isEmpty(id: Long): Boolean = { + val queue = + if (id < FixedQueues) queues1.get(id.toInt) + else queues2.get(id) + if (queue eq null) + throw new IllegalArgumentException(s"Invalid stream identifier: $id") + queue.isEmpty + } + + override def nonEmpty(id: Long): Boolean = !isEmpty(id) + + override def offer(id: Long, elem: Any): Unit = { + @tailrec def offer1(): Unit = { + val i = id.toInt + val queue = queues1.get(i) + if (queue eq null) + throw new IllegalArgumentException(s"Invalid stream identifier: $id") + if (queues1.compareAndSet(i, queue, queue.enqueue(elem))) + _totalSize.incrementAndGet() + else + offer1() // CAS failed, retry + } + + @tailrec def offer2(): Unit = { + val queue = queues2.get(id) + if (queue eq null) + throw new IllegalArgumentException(s"Invalid stream identifier: $id") + if (queues2.replace(id, queue, queue.enqueue(elem))) { + _totalSize.incrementAndGet() + } else + offer2() // CAS failed, retry + } + + if (id < FixedQueues) offer1() else offer2() + } + + override def poll(id: Long): AnyRef = { + @tailrec def poll1(): AnyRef = { + val i = id.toInt + val queue = queues1.get(i) + if ((queue eq null) || queue.isEmpty) null + else if (queues1.compareAndSet(i, queue, queue.tail)) { + _totalSize.decrementAndGet() + queue.head.asInstanceOf[AnyRef] + } else + poll1() // CAS failed, try again + } + + @tailrec def poll2(): AnyRef = { + val queue = queues2.get(id) + if ((queue eq null) || queue.isEmpty) null + else if (queues2.replace(id, queue, queue.tail)) { + _totalSize.decrementAndGet() + queue.head.asInstanceOf[AnyRef] + } else + poll2() // CAS failed, try again + } + + if (id < FixedQueues) poll1() else poll2() + } + + override def remove(id: Long): Unit = { + (if (id < FixedQueues) queues1.getAndSet(id.toInt, null) + else queues2.remove(id)) match { + case null ⇒ + case queue ⇒ _totalSize.addAndGet(-queue.size) + } + } + + } + } +} + +/** + * INTERNAL API + */ +@InternalApi private[akka] class PartitionHub[T]( + partitioner: () ⇒ (PartitionHub.ConsumerInfo, T) ⇒ Long, + startAfterNrOfConsumers: Int, bufferSize: Int) + extends GraphStageWithMaterializedValue[SinkShape[T], Source[T, NotUsed]] { + import PartitionHub.Internal._ + import PartitionHub.ConsumerInfo + + val in: Inlet[T] = Inlet("PartitionHub.in") + override val shape: SinkShape[T] = SinkShape(in) + + // Need the queue to be pluggable to be able to use a more performant (less general) + // queue in Artery + def createQueue(): PartitionQueue = new PartitionQueueImpl + + private class PartitionSinkLogic(_shape: Shape) + extends GraphStageLogic(_shape) with InHandler { + + // Half of buffer size, rounded up + private val DemandThreshold = (bufferSize / 2) + (bufferSize % 2) + + private val materializedPartitioner = partitioner() + + private val callbackPromise: Promise[AsyncCallback[HubEvent]] = Promise() + private val noRegistrationsState = Open(callbackPromise.future, Nil) + val state = new AtomicReference[HubState](noRegistrationsState) + private var initialized = false + + private val queue = createQueue() + private var pending = Vector.empty[T] + private var consumerInfo: ConsumerInfoImpl = new ConsumerInfoImpl(Vector.empty) + private val needWakeup: LongMap[Consumer] = LongMap.empty + + private var callbackCount = 0L + + private final class ConsumerInfoImpl(val consumers: Vector[Consumer]) + extends ConsumerInfo { info ⇒ + + override def queueSize(consumerId: Long): Int = + queue.size(consumerId) + + override def size: Int = consumers.size + + override def consumerIds: immutable.IndexedSeq[Long] = + consumers.map(_.id) + + override def consumerIdByIdx(idx: Int): Long = + consumers(idx).id + + override def getConsumerIds: java.util.List[Long] = + new util.AbstractList[Long] { + override def get(idx: Int): Long = info.consumerIdByIdx(idx) + override def size(): Int = info.size + } + } + + override def preStart(): Unit = { + setKeepGoing(true) + callbackPromise.success(getAsyncCallback[HubEvent](onEvent)) + if (startAfterNrOfConsumers == 0) + pull(in) + } + + override def onPush(): Unit = { + publish(grab(in)) + if (!isFull) pull(in) + } + + private def isFull: Boolean = { + (queue.totalSize + pending.size) >= bufferSize + } + + private def publish(elem: T): Unit = { + if (!initialized || consumerInfo.consumers.isEmpty) { + // will be published when first consumers are registered + pending :+= elem + } else { + val id = materializedPartitioner(consumerInfo, elem) + queue.offer(id, elem) + wakeup(id) + } + } + + private def wakeup(id: Long): Unit = { + needWakeup.get(id) match { + case None ⇒ // ignore + case Some(consumer) ⇒ + needWakeup -= id + consumer.callback.invoke(Wakeup) + } + } + + override def onUpstreamFinish(): Unit = { + if (consumerInfo.consumers.isEmpty) + completeStage() + else { + consumerInfo.consumers.foreach(c ⇒ complete(c.id)) + } + } + + private def complete(id: Long): Unit = { + queue.offer(id, Completed) + wakeup(id) + } + + private def tryPull(): Unit = { + if (initialized && !isClosed(in) && !hasBeenPulled(in) && !isFull) + pull(in) + } + + private def onEvent(ev: HubEvent): Unit = { + callbackCount += 1 + ev match { + case NeedWakeup(consumer) ⇒ + // Also check if the consumer is now unblocked since we published an element since it went asleep. + if (queue.nonEmpty(consumer.id)) + consumer.callback.invoke(Wakeup) + else { + needWakeup.update(consumer.id, consumer) + tryPull() + } + + case TryPull ⇒ + tryPull() + + case RegistrationPending ⇒ + state.getAndSet(noRegistrationsState).asInstanceOf[Open].registrations foreach { consumer ⇒ + val newConsumers = (consumerInfo.consumers :+ consumer).sortBy(_.id) + consumerInfo = new ConsumerInfoImpl(newConsumers) + queue.init(consumer.id) + if (newConsumers.size >= startAfterNrOfConsumers) { + initialized = true + } + + consumer.callback.invoke(Initialize) + + if (initialized && pending.nonEmpty) { + pending.foreach(publish) + pending = Vector.empty[T] + } + + tryPull() + } + + case UnRegister(id) ⇒ + val newConsumers = consumerInfo.consumers.filterNot(_.id == id) + consumerInfo = new ConsumerInfoImpl(newConsumers) + queue.remove(id) + if (newConsumers.isEmpty) { + if (isClosed(in)) completeStage() + } else + tryPull() + } + } + + override def onUpstreamFailure(ex: Throwable): Unit = { + val failMessage = HubCompleted(Some(ex)) + + // Notify pending consumers and set tombstone + state.getAndSet(Closed(Some(ex))).asInstanceOf[Open].registrations foreach { consumer ⇒ + consumer.callback.invoke(failMessage) + } + + // Notify registered consumers + consumerInfo.consumers.foreach { consumer ⇒ + consumer.callback.invoke(failMessage) + } + failStage(ex) + } + + override def postStop(): Unit = { + // Notify pending consumers and set tombstone + + @tailrec def tryClose(): Unit = state.get() match { + case Closed(_) ⇒ // Already closed, ignore + case open: Open ⇒ + if (state.compareAndSet(open, Closed(None))) { + val completedMessage = HubCompleted(None) + open.registrations foreach { consumer ⇒ + consumer.callback.invoke(completedMessage) + } + } else tryClose() + } + + tryClose() + } + + // Consumer API + def poll(id: Long, hubCallback: AsyncCallback[HubEvent]): AnyRef = { + // try pull via async callback when half full + // this is racy with other threads doing poll but doesn't matter + if (queue.totalSize == DemandThreshold) + hubCallback.invoke(TryPull) + + queue.poll(id) + } + + setHandler(in, this) + } + + override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Source[T, NotUsed]) = { + val idCounter = new AtomicLong + + val logic = new PartitionSinkLogic(shape) + + val source = new GraphStage[SourceShape[T]] { + val out: Outlet[T] = Outlet("PartitionHub.out") + override val shape: SourceShape[T] = SourceShape(out) + + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { + private val id = idCounter.getAndIncrement() + private var hubCallback: AsyncCallback[HubEvent] = _ + private val callback = getAsyncCallback(onCommand) + private val consumer = Consumer(id, callback) + + private var callbackCount = 0L + + override def preStart(): Unit = { + val onHubReady: Try[AsyncCallback[HubEvent]] ⇒ Unit = { + case Success(callback) ⇒ + hubCallback = callback + callback.invoke(RegistrationPending) + if (isAvailable(out)) onPull() + case Failure(ex) ⇒ + failStage(ex) + } + + @tailrec def register(): Unit = { + logic.state.get() match { + case Closed(Some(ex)) ⇒ failStage(ex) + case Closed(None) ⇒ completeStage() + case previousState @ Open(callbackFuture, registrations) ⇒ + val newRegistrations = consumer :: registrations + if (logic.state.compareAndSet(previousState, Open(callbackFuture, newRegistrations))) { + callbackFuture.onComplete(getAsyncCallback(onHubReady).invoke)(materializer.executionContext) + } else register() + } + } + + register() + + } + + override def onPull(): Unit = { + if (hubCallback ne null) { + val elem = logic.poll(id, hubCallback) + + elem match { + case null ⇒ + hubCallback.invoke(NeedWakeup(consumer)) + case Completed ⇒ + completeStage() + case _ ⇒ + push(out, elem.asInstanceOf[T]) + } + } + } + + override def postStop(): Unit = { + if (hubCallback ne null) + hubCallback.invoke(UnRegister(id)) + } + + private def onCommand(cmd: ConsumerEvent): Unit = { + callbackCount += 1 + cmd match { + case HubCompleted(Some(ex)) ⇒ failStage(ex) + case HubCompleted(None) ⇒ completeStage() + case Wakeup ⇒ + if (isAvailable(out)) onPull() + case Initialize ⇒ + if (isAvailable(out) && (hubCallback ne null)) onPull() + } + } + + setHandler(out, this) + } + } + + (logic, Source.fromGraph(source)) + } +} diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Restart.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Restart.scala new file mode 100644 index 0000000000..329e3eb583 --- /dev/null +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Restart.scala @@ -0,0 +1,356 @@ +/** + * Copyright (C) 2015-2017 Lightbend Inc. + */ +package akka.stream.scaladsl + +import akka.NotUsed +import akka.pattern.BackoffSupervisor +import akka.stream._ +import akka.stream.stage.{ GraphStage, InHandler, OutHandler, TimerGraphStageLogicWithLogging } + +import scala.concurrent.duration.FiniteDuration + +/** + * A RestartSource wraps a [[Source]] that gets restarted when it completes or fails. + * + * They are useful for graphs that need to run for longer than the [[Source]] can necessarily guarantee it will, for + * example, for [[Source]] streams that depend on a remote server that may crash or become partitioned. The + * RestartSource ensures that the graph can continue running while the [[Source]] restarts. + */ +object RestartSource { + + /** + * Wrap the given [[Source]] with a [[Source]] that will restart it when it fails or complete using an exponential + * backoff. + * + * This [[Source]] will never emit a complete or failure, since the completion or failure of the wrapped [[Source]] + * is always handled by restarting it. The wrapped [[Source]] can however be cancelled by cancelling this [[Source]]. + * When that happens, the wrapped [[Source]], if currently running will be cancelled, and it will not be restarted. + * This can be triggered simply by the downstream cancelling, or externally by introducing a [[KillSwitch]] right + * after this [[Source]] in the graph. + * + * This uses the same exponential backoff algorithm as [[akka.pattern.Backoff]]. + * + * @param minBackoff minimum (initial) duration until the child actor will + * started again, if it is terminated + * @param maxBackoff the exponential back-off is capped to this duration + * @param randomFactor after calculation of the exponential back-off an additional + * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. + * In order to skip this additional delay pass in `0`. + * @param sourceFactory A factory for producing the [[Source]] to wrap. + */ + def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(sourceFactory: () ⇒ Source[T, _]): Source[T, NotUsed] = { + Source.fromGraph(new RestartWithBackoffSource(sourceFactory, minBackoff, maxBackoff, randomFactor)) + } +} + +private final class RestartWithBackoffSource[T]( + sourceFactory: () ⇒ Source[T, _], + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double +) extends GraphStage[SourceShape[T]] { self ⇒ + + val out = Outlet[T]("RestartWithBackoffSource.out") + + override def shape = SourceShape(out) + override def createLogic(inheritedAttributes: Attributes) = new RestartWithBackoffLogic( + "Source", shape, minBackoff, maxBackoff, randomFactor + ) { + + override protected def logSource = self.getClass + + override protected def startGraph() = { + val sinkIn = createSubInlet(out) + sourceFactory().runWith(sinkIn.sink)(subFusingMaterializer) + if (isAvailable(out)) { + sinkIn.pull() + } + } + + override protected def backoff() = { + setHandler(out, new OutHandler { + override def onPull() = () + }) + } + + backoff() + } +} + +/** + * A RestartSink wraps a [[Sink]] that gets restarted when it completes or fails. + * + * They are useful for graphs that need to run for longer than the [[Sink]] can necessarily guarantee it will, for + * example, for [[Sink]] streams that depend on a remote server that may crash or become partitioned. The + * RestartSink ensures that the graph can continue running while the [[Sink]] restarts. + */ +object RestartSink { + + /** + * Wrap the given [[Sink]] with a [[Sink]] that will restart it when it fails or complete using an exponential + * backoff. + * + * This [[Sink]] will never cancel, since cancellation by the wrapped [[Sink]] is always handled by restarting it. + * The wrapped [[Sink]] can however be completed by feeding a completion or error into this [[Sink]]. When that + * happens, the [[Sink]], if currently running, will terminate and will not be restarted. This can be triggered + * simply by the upstream completing, or externally by introducing a [[KillSwitch]] right before this [[Sink]] in the + * graph. + * + * The restart process is inherently lossy, since there is no coordination between cancelling and the sending of + * messages. When the wrapped [[Sink]] does cancel, this [[Sink]] will backpressure, however any elements already + * sent may have been lost. + * + * This uses the same exponential backoff algorithm as [[akka.pattern.Backoff]]. + * + * @param minBackoff minimum (initial) duration until the child actor will + * started again, if it is terminated + * @param maxBackoff the exponential back-off is capped to this duration + * @param randomFactor after calculation of the exponential back-off an additional + * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. + * In order to skip this additional delay pass in `0`. + * @param sinkFactory A factory for producing the [[Sink]] to wrap. + */ + def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(sinkFactory: () ⇒ Sink[T, _]): Sink[T, NotUsed] = { + Sink.fromGraph(new RestartWithBackoffSink(sinkFactory, minBackoff, maxBackoff, randomFactor)) + } +} + +private final class RestartWithBackoffSink[T]( + sinkFactory: () ⇒ Sink[T, _], + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double +) extends GraphStage[SinkShape[T]] { self ⇒ + + val in = Inlet[T]("RestartWithBackoffSink.in") + + override def shape = SinkShape(in) + override def createLogic(inheritedAttributes: Attributes) = new RestartWithBackoffLogic( + "Sink", shape, minBackoff, maxBackoff, randomFactor + ) { + override protected def logSource = self.getClass + + override protected def startGraph() = { + val sourceOut = createSubOutlet(in) + Source.fromGraph(sourceOut.source).runWith(sinkFactory())(subFusingMaterializer) + } + + override protected def backoff() = { + setHandler(in, new InHandler { + override def onPush() = () + }) + } + + backoff() + } +} + +/** + * A RestartFlow wraps a [[Flow]] that gets restarted when it completes or fails. + * + * They are useful for graphs that need to run for longer than the [[Flow]] can necessarily guarantee it will, for + * example, for [[Flow]] streams that depend on a remote server that may crash or become partitioned. The + * RestartFlow ensures that the graph can continue running while the [[Flow]] restarts. + */ +object RestartFlow { + + /** + * Wrap the given [[Flow]] with a [[Flow]] that will restart it when it fails or complete using an exponential + * backoff. + * + * This [[Flow]] will not cancel, complete or emit a failure, until the opposite end of it has been cancelled or + * completed. Any termination by the [[Flow]] before that time will be handled by restarting it. Any termination + * signals sent to this [[Flow]] however will terminate the wrapped [[Flow]], if it's running, and then the [[Flow]] + * will be allowed to terminate without being restarted. + * + * The restart process is inherently lossy, since there is no coordination between cancelling and the sending of + * messages. A termination signal from either end of the wrapped [[Flow]] will cause the other end to be terminated, + * and any in transit messages will be lost. During backoff, this [[Flow]] will backpressure. + * + * This uses the same exponential backoff algorithm as [[akka.pattern.Backoff]]. + * + * @param minBackoff minimum (initial) duration until the child actor will + * started again, if it is terminated + * @param maxBackoff the exponential back-off is capped to this duration + * @param randomFactor after calculation of the exponential back-off an additional + * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. + * In order to skip this additional delay pass in `0`. + * @param flowFactory A factory for producing the [[Flow]] to wrap. + */ + def withBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(flowFactory: () ⇒ Flow[In, Out, _]): Flow[In, Out, NotUsed] = { + Flow.fromGraph(new RestartWithBackoffFlow(flowFactory, minBackoff, maxBackoff, randomFactor)) + } +} + +private final class RestartWithBackoffFlow[In, Out]( + flowFactory: () ⇒ Flow[In, Out, _], + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double +) extends GraphStage[FlowShape[In, Out]] { self ⇒ + + val in = Inlet[In]("RestartWithBackoffFlow.in") + val out = Outlet[Out]("RestartWithBackoffFlow.out") + + override def shape = FlowShape(in, out) + override def createLogic(inheritedAttributes: Attributes) = new RestartWithBackoffLogic( + "Flow", shape, minBackoff, maxBackoff, randomFactor + ) { + + var activeOutIn: Option[(SubSourceOutlet[In], SubSinkInlet[Out])] = None + + override protected def logSource = self.getClass + + override protected def startGraph() = { + val sourceOut = createSubOutlet(in) + val sinkIn = createSubInlet(out) + Source.fromGraph(sourceOut.source).via(flowFactory()).runWith(sinkIn.sink)(subFusingMaterializer) + if (isAvailable(out)) { + sinkIn.pull() + } + activeOutIn = Some((sourceOut, sinkIn)) + } + + override protected def backoff() = { + setHandler(in, new InHandler { + override def onPush() = () + }) + setHandler(out, new OutHandler { + override def onPull() = () + }) + + // We need to ensure that the other end of the sub flow is also completed, so that we don't + // receive any callbacks from it. + activeOutIn.foreach { + case (sourceOut, sinkIn) ⇒ + if (!sourceOut.isClosed) { + sourceOut.complete() + } + if (!sinkIn.isClosed) { + sinkIn.cancel() + } + activeOutIn = None + } + } + + backoff() + } +} + +/** + * Shared logic for all restart with backoff logics. + */ +private abstract class RestartWithBackoffLogic[S <: Shape]( + name: String, + shape: S, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double +) extends TimerGraphStageLogicWithLogging(shape) { + var restartCount = 0 + var resetDeadline = minBackoff.fromNow + // This is effectively only used for flows, if either the main inlet or outlet of this stage finishes, then we + // don't want to restart the sub inlet when it finishes, we just finish normally. + var finishing = false + + protected def startGraph(): Unit + protected def backoff(): Unit + + protected final def createSubInlet[T](out: Outlet[T]): SubSinkInlet[T] = { + val sinkIn = new SubSinkInlet[T](s"RestartWithBackoff$name.subIn") + + sinkIn.setHandler(new InHandler { + override def onPush() = push(out, sinkIn.grab()) + override def onUpstreamFinish() = { + if (finishing) { + complete(out) + } else { + log.debug("Graph out finished") + onCompleteOrFailure() + } + } + override def onUpstreamFailure(ex: Throwable) = { + if (finishing) { + fail(out, ex) + } else { + log.error(ex, "Restarting graph due to failure") + onCompleteOrFailure() + } + } + }) + + setHandler(out, new OutHandler { + override def onPull() = sinkIn.pull() + override def onDownstreamFinish() = { + finishing = true + sinkIn.cancel() + } + }) + + sinkIn + } + + protected final def createSubOutlet[T](in: Inlet[T]): SubSourceOutlet[T] = { + val sourceOut = new SubSourceOutlet[T](s"RestartWithBackoff$name.subOut") + + sourceOut.setHandler(new OutHandler { + override def onPull() = if (isAvailable(in)) { + sourceOut.push(grab(in)) + } else { + if (!hasBeenPulled(in)) { + pull(in) + } + } + override def onDownstreamFinish() = { + if (finishing) { + cancel(in) + } else { + log.debug("Graph in finished") + onCompleteOrFailure() + } + } + }) + + setHandler(in, new InHandler { + override def onPush() = if (sourceOut.isAvailable) { + sourceOut.push(grab(in)) + } + override def onUpstreamFinish() = { + finishing = true + sourceOut.complete() + } + override def onUpstreamFailure(ex: Throwable) = { + finishing = true + sourceOut.fail(ex) + } + }) + + sourceOut + } + + // Set a timer to restart after the calculated delay + protected final def onCompleteOrFailure() = { + // Check if the last start attempt was more than the minimum backoff + if (resetDeadline.isOverdue()) { + log.debug("Last restart attempt was more than {} ago, resetting restart count", minBackoff) + restartCount = 0 + } + + val restartDelay = BackoffSupervisor.calculateDelay(restartCount, minBackoff, maxBackoff, randomFactor) + log.debug("Restarting graph in {}", restartDelay) + scheduleOnce("RestartTimer", restartDelay) + restartCount += 1 + // And while we wait, we go into backoff mode + backoff() + } + + // Invoked when the backoff timer ticks + override protected def onTimer(timerKey: Any) = { + startGraph() + resetDeadline = minBackoff.fromNow + } + + // When the stage starts, start the source + override def preStart() = startGraph() +} diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala index c5ee665b90..1fa929a64d 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala @@ -275,6 +275,8 @@ object Sink { * the reduce stage will fail its downstream with a [[NoSuchElementException]], * which is semantically in-line with that Scala's standard library collections * do in such situations. + * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. */ def reduce[T](f: (T, T) ⇒ T): Sink[T, Future[T]] = Flow[T].reduce(f).toMat(Sink.head)(Keep.right).named("reduceSink") @@ -403,6 +405,8 @@ object Sink { * try to create sink with next element * * `fallback` will be executed when there was no elements and completed is received from upstream. + * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. */ def lazyInit[T, M](sinkFactory: T ⇒ Future[Sink[T, M]], fallback: () ⇒ M): Sink[T, Future[M]] = Sink.fromGraph(new LazySink(sinkFactory, fallback)) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala index 5b59a3ae2c..12e5a691ac 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala @@ -275,7 +275,7 @@ object Source { * Streams the elements of an asynchronous source once its given `completion` stage completes. * If the `completion` fails the stream is failed with that exception. */ - def fromSourceCompletionStage[T, M](completion: CompletionStage[Graph[SourceShape[T], M]]): Source[T, CompletionStage[M]] = fromFutureSource(completion.toScala).mapMaterializedValue(_.toJava) + def fromSourceCompletionStage[T, M](completion: CompletionStage[_ <: Graph[SourceShape[T], M]]): Source[T, CompletionStage[M]] = fromFutureSource(completion.toScala).mapMaterializedValue(_.toJava) /** * Elements are emitted periodically with the specified interval. @@ -355,16 +355,13 @@ object Source { * with None. */ def maybe[T]: Source[T, Promise[Option[T]]] = - fromGraph(new MaybeSource[T](DefaultAttributes.maybeSource, shape("MaybeSource"))) + Source.fromGraph(MaybeSource.asInstanceOf[Graph[SourceShape[T], Promise[Option[T]]]]) /** * Create a `Source` that immediately ends the stream with the `cause` error to every connected `Sink`. */ def failed[T](cause: Throwable): Source[T, NotUsed] = - fromGraph(new PublisherSource( - ErrorPublisher(cause, "FailedSource")[T], - DefaultAttributes.failedSource, - shape("FailedSource"))) + Source.fromGraph(new FailedSource[T](cause)) /** * Creates a `Source` that is not materialized until there is downstream demand, when the source gets materialized @@ -520,6 +517,8 @@ object Source { * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * @param create - function that is called on stream start and creates/opens resource. * @param read - function that reads data from opened resource. It is called each time backpressure signal * is received. Stream calls close and completes when `read` returns None. @@ -541,6 +540,8 @@ object Source { * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * + * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. + * * @param create - function that is called on stream start and creates/opens resource. * @param read - function that reads data from opened resource. It is called each time backpressure signal * is received. Stream calls close and completes when `Future` from read function returns None. diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala index 5657ec5e7a..a2a15fd8c8 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala @@ -29,8 +29,10 @@ object StreamConverters { /** * Creates a Source from an [[InputStream]] created by the given function. - * Emitted elements are `chunkSize` sized [[akka.util.ByteString]] elements, - * except the final element, which will be up to `chunkSize` in size. + * Emitted elements are up to `chunkSize` sized [[akka.util.ByteString]] elements. + * The actual size of emitted elements depends how much data the underlying + * [[java.io.InputStream]] returns on each read invocation. Such chunks will + * never be larger than chunkSize though. * * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or * set it for a given Source by using [[akka.stream.ActorAttributes]]. diff --git a/akka-testkit/src/main/mima-filters/2.4.9.backwards.excludes b/akka-testkit/src/main/mima-filters/2.4.9.backwards.excludes new file mode 100644 index 0000000000..672c256ab4 --- /dev/null +++ b/akka-testkit/src/main/mima-filters/2.4.9.backwards.excludes @@ -0,0 +1,4 @@ +# #21201 adding childActorOf to TestActor / TestKit / TestProbe +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.childActorOf$default$3") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.childActorOf$default$2") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.childActorOf") diff --git a/akka-testkit/src/main/mima-filters/2.4.x.backwards.excludes b/akka-testkit/src/main/mima-filters/2.4.x.backwards.excludes new file mode 100644 index 0000000000..a6cb2db2d0 --- /dev/null +++ b/akka-testkit/src/main/mima-filters/2.4.x.backwards.excludes @@ -0,0 +1,4 @@ +# #22374 introduce fishForSpecificMessage in TestKit +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.fishForSpecificMessage$default$1") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.fishForSpecificMessage") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.fishForSpecificMessage$default$2") diff --git a/akka-typed-testkit/src/main/scala/akka/typed/testkit/Effects.scala b/akka-typed-testkit/src/main/scala/akka/typed/testkit/Effects.scala index 2a0be055f2..c551dc42f3 100644 --- a/akka-typed-testkit/src/main/scala/akka/typed/testkit/Effects.scala +++ b/akka-typed-testkit/src/main/scala/akka/typed/testkit/Effects.scala @@ -5,7 +5,7 @@ package akka.typed.testkit import java.util.concurrent.ConcurrentLinkedQueue -import akka.typed.{ ActorContext, ActorRef, ActorSystem, Behavior, EmptyProps, PostStop, Props, Signal } +import akka.typed.{ ActorContext, ActorRef, ActorSystem, Behavior, PostStop, Props, Signal } import scala.annotation.tailrec import scala.collection.immutable @@ -20,7 +20,12 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } abstract class Effect object Effect { - @SerialVersionUID(1L) final case class Spawned(childName: String) extends Effect + + abstract class SpawnedEffect extends Effect + + @SerialVersionUID(1L) final case class Spawned(childName: String, props: Props) extends SpawnedEffect + @SerialVersionUID(1L) final case class SpawnedAnonymous(props: Props) extends SpawnedEffect + @SerialVersionUID(1L) final case object SpawnedAdapter extends SpawnedEffect @SerialVersionUID(1L) final case class Stopped(childName: String) extends Effect @SerialVersionUID(1L) final case class Watched[T](other: ActorRef[T]) extends Effect @SerialVersionUID(1L) final case class Unwatched[T](other: ActorRef[T]) extends Effect @@ -78,18 +83,23 @@ class EffectfulActorContext[T](_name: String, _initialBehavior: Behavior[T], _ma } override def spawnAnonymous[U](behavior: Behavior[U], props: Props = Props.empty): ActorRef[U] = { - val ref = super.spawnAnonymous(behavior) - effectQueue.offer(Spawned(ref.path.name)) + val ref = super.spawnAnonymous(behavior, props) + effectQueue.offer(SpawnedAnonymous(props)) ref } - override def spawnAdapter[U](f: U ⇒ T, name: String = ""): ActorRef[U] = { + + override def spawnAdapter[U](f: U ⇒ T): ActorRef[U] = { + spawnAdapter(f, "") + } + + override def spawnAdapter[U](f: U ⇒ T, name: String): ActorRef[U] = { val ref = super.spawnAdapter(f, name) - effectQueue.offer(Spawned(ref.path.name)) + effectQueue.offer(SpawnedAdapter) ref } override def spawn[U](behavior: Behavior[U], name: String, props: Props = Props.empty): ActorRef[U] = { - effectQueue.offer(Spawned(name)) - super.spawn(behavior, name) + effectQueue.offer(Spawned(name, props)) + super.spawn(behavior, name, props) } override def stop[U](child: ActorRef[U]): Boolean = { effectQueue.offer(Stopped(child.path.name)) diff --git a/akka-typed-testkit/src/test/scala/akka/typed/testkit/EffectfulActorContextSpec.scala b/akka-typed-testkit/src/test/scala/akka/typed/testkit/EffectfulActorContextSpec.scala new file mode 100644 index 0000000000..9a6ce3585f --- /dev/null +++ b/akka-typed-testkit/src/test/scala/akka/typed/testkit/EffectfulActorContextSpec.scala @@ -0,0 +1,138 @@ +/** + * Copyright (C) 2014-2017 Lightbend Inc. + */ + +package akka.typed.testkit + +import akka.typed.scaladsl.Actor +import akka.typed.testkit.Effect.{ Spawned, SpawnedAdapter, SpawnedAnonymous } +import akka.typed.testkit.EffectfulActorContextSpec.Father +import akka.typed.testkit.EffectfulActorContextSpec.Father._ +import akka.typed.{ ActorSystem, Behavior, Props } +import org.scalatest.{ FlatSpec, Matchers } + +object EffectfulActorContextSpec { + object Father { + + case class Reproduce(times: Int) + + sealed trait Command + + case class SpawnChildren(numberOfChildren: Int) extends Command + case class SpawnChildrenWithProps(numberOfChildren: Int, props: Props) extends Command + case class SpawnAnonymous(numberOfChildren: Int) extends Command + case class SpawnAnonymousWithProps(numberOfChildren: Int, props: Props) extends Command + case object SpawnAdapter extends Command + case class SpawnAdapterWithName(name: String) extends Command + + def behavior: Behavior[Command] = init() + + def init(): Behavior[Command] = Actor.immutable[Command] { (ctx, msg) ⇒ + msg match { + case SpawnChildren(numberOfChildren) if numberOfChildren > 0 ⇒ + 0.until(numberOfChildren).foreach { i ⇒ + ctx.spawn(Child.initial, s"child$i") + } + Actor.same + case SpawnChildrenWithProps(numberOfChildren, props) if numberOfChildren > 0 ⇒ + 0.until(numberOfChildren).foreach { i ⇒ + ctx.spawn(Child.initial, s"child$i", props) + } + Actor.same + case SpawnAnonymous(numberOfChildren) if numberOfChildren > 0 ⇒ + 0.until(numberOfChildren).foreach { _ ⇒ + ctx.spawnAnonymous(Child.initial) + } + Actor.same + case SpawnAnonymousWithProps(numberOfChildren, props) if numberOfChildren > 0 ⇒ + 0.until(numberOfChildren).foreach { _ ⇒ + ctx.spawnAnonymous(Child.initial, props) + } + Actor.same + case SpawnAdapter ⇒ + ctx.spawnAdapter { + r: Reproduce ⇒ SpawnAnonymous(r.times) + } + Actor.same + case SpawnAdapterWithName(name) ⇒ + ctx.spawnAdapter({ + r: Reproduce ⇒ SpawnAnonymous(r.times) + }, name) + Actor.same + } + } + } + + object Child { + + sealed trait Action + + def initial: Behavior[Action] = Actor.immutable[Action] { (_, msg) ⇒ + msg match { + case _ ⇒ + Actor.empty + } + } + + } + +} + +class EffectfulActorContextSpec extends FlatSpec with Matchers { + + private val props = Props.empty.withMailboxCapacity(10) + + "EffectfulActorContext's spawn" should "create children when no props specified" in { + val system = ActorSystem.create(Father.init(), "father-system") + val ctx = new EffectfulActorContext[Father.Command]("father-test", Father.init(), 100, system) + + ctx.run(SpawnChildren(2)) + val effects = ctx.getAllEffects() + effects should contain only (Spawned("child0", Props.empty), Spawned("child1", Props.empty)) + } + + it should "create children when props specified and record effects" in { + val system = ActorSystem.create(Father.init(), "father-system") + val ctx = new EffectfulActorContext[Father.Command]("father-test", Father.init(), 100, system) + + ctx.run(SpawnChildrenWithProps(2, props)) + val effects = ctx.getAllEffects() + effects should contain only (Spawned("child0", props), Spawned("child1", props)) + } + + "EffectfulActorContext's spawnAnonymous" should "create children when no props specified and record effects" in { + val system = ActorSystem.create(Father.init(), "father-system") + val ctx = new EffectfulActorContext[Father.Command]("father-test", Father.init(), 100, system) + + ctx.run(SpawnAnonymous(2)) + val effects = ctx.getAllEffects() + effects shouldBe Seq(SpawnedAnonymous(Props.empty), SpawnedAnonymous(Props.empty)) + } + + it should "create children when props specified and record effects" in { + val system = ActorSystem.create(Father.init(), "father-system") + val ctx = new EffectfulActorContext[Father.Command]("father-test", Father.init(), 100, system) + + ctx.run(SpawnAnonymousWithProps(2, props)) + val effects = ctx.getAllEffects() + effects shouldBe Seq(SpawnedAnonymous(props), SpawnedAnonymous(props)) + } + + "EffectfulActorContext's spawnAdapter" should "create adapters without name and record effects" in { + val system = ActorSystem.create(Father.init(), "father-system") + val ctx = new EffectfulActorContext[Father.Command]("father-test", Father.init(), 100, system) + + ctx.run(SpawnAdapter) + val effects = ctx.getAllEffects() + effects shouldBe Seq(SpawnedAdapter) + } + + it should "create adapters with name and record effects" in { + val system = ActorSystem.create(Father.init(), "father-system") + val ctx = new EffectfulActorContext[Father.Command]("father-test", Father.init(), 100, system) + + ctx.run(SpawnAdapterWithName("adapter")) + val effects = ctx.getAllEffects() + effects shouldBe Seq(SpawnedAdapter) + } +} diff --git a/akka-typed-tests/src/test/java/akka/typed/ExtensionsTest.java b/akka-typed-tests/src/test/java/akka/typed/ExtensionsTest.java index 86b5d78d68..279c058357 100644 --- a/akka-typed-tests/src/test/java/akka/typed/ExtensionsTest.java +++ b/akka-typed-tests/src/test/java/akka/typed/ExtensionsTest.java @@ -43,8 +43,8 @@ public class ExtensionsTest extends JUnitSuite { @Test public void loadJavaExtensionsFromConfig() { final ActorSystem system = ActorSystem.create( - "loadJavaExtensionsFromConfig", Behavior.empty(), + "loadJavaExtensionsFromConfig", Optional.empty(), Optional.of(ConfigFactory.parseString("akka.typed.extensions += \"akka.typed.ExtensionsTest$MyExtension\"").resolve()), Optional.empty(), @@ -66,7 +66,7 @@ public class ExtensionsTest extends JUnitSuite { @Test public void loadScalaExtension() { - final ActorSystem system = ActorSystem.create("loadScalaExtension", Behavior.empty()); + final ActorSystem system = ActorSystem.create(Behavior.empty(), "loadScalaExtension"); try { DummyExtension1 instance1 = DummyExtension1.get(system); DummyExtension1 instance2 = DummyExtension1.get(system); diff --git a/akka-typed-tests/src/test/java/akka/typed/javadsl/ActorCompile.java b/akka-typed-tests/src/test/java/akka/typed/javadsl/ActorCompile.java index 0364491777..c9119f38a1 100644 --- a/akka-typed-tests/src/test/java/akka/typed/javadsl/ActorCompile.java +++ b/akka-typed-tests/src/test/java/akka/typed/javadsl/ActorCompile.java @@ -45,7 +45,7 @@ public class ActorCompile { Behavior actor9 = widened(actor7, pf -> pf.match(MyMsgA.class, x -> x)); Behavior actor10 = immutable((ctx, msg) -> stopped(actor4), (ctx, signal) -> same()); - ActorSystem system = ActorSystem.create("Sys", actor1); + ActorSystem system = ActorSystem.create(actor1, "Sys"); { Actor.immutable((ctx, msg) -> { diff --git a/akka-typed-tests/src/test/java/akka/typed/javadsl/WatchTest.java b/akka-typed-tests/src/test/java/akka/typed/javadsl/WatchTest.java index c8da9ddfd0..0f1e0f3b27 100644 --- a/akka-typed-tests/src/test/java/akka/typed/javadsl/WatchTest.java +++ b/akka-typed-tests/src/test/java/akka/typed/javadsl/WatchTest.java @@ -70,7 +70,7 @@ public class WatchTest extends JUnitSuite { watched.tell(new Stop()); return waitingForTermination(msg.replyTo); }); - ActorSystem> system = ActorSystem.create("sysname", root); + ActorSystem> system = ActorSystem.create(root, "sysname"); try { // Not sure why this does not compile without an explicit cast? // system.tell(new RunTest()); @@ -93,7 +93,7 @@ public class WatchTest extends JUnitSuite { return unhandled(); } }); - ActorSystem system = ActorSystem.create("sysname", root); + ActorSystem system = ActorSystem.create(root, "sysname"); try { // Not sure why this does not compile without an explicit cast? // system.tell(new RunTest()); @@ -103,4 +103,4 @@ public class WatchTest extends JUnitSuite { Await.ready(system.terminate(), Duration.create(10, TimeUnit.SECONDS)); } } -} \ No newline at end of file +} diff --git a/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java b/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java index 5d59744b7d..6392a2fc2b 100644 --- a/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java +++ b/akka-typed-tests/src/test/java/jdocs/akka/typed/IntroTest.java @@ -58,7 +58,7 @@ public class IntroTest { public static void main(String[] args) { //#hello-world final ActorSystem system = - ActorSystem.create("hello", HelloWorld.greeter); + ActorSystem.create(HelloWorld.greeter, "hello"); final CompletionStage reply = AskPattern.ask(system, @@ -198,7 +198,7 @@ public class IntroTest { }); final ActorSystem system = - ActorSystem.create("ChatRoomDemo", main); + ActorSystem.create(main, "ChatRoomDemo"); Await.result(system.whenTerminated(), Duration.create(3, TimeUnit.SECONDS)); //#chatroom-main diff --git a/akka-typed-tests/src/test/scala/akka/typed/ExtensionsSpec.scala b/akka-typed-tests/src/test/scala/akka/typed/ExtensionsSpec.scala index 0899dfb337..4c7d7cde9b 100644 --- a/akka-typed-tests/src/test/scala/akka/typed/ExtensionsSpec.scala +++ b/akka-typed-tests/src/test/scala/akka/typed/ExtensionsSpec.scala @@ -8,7 +8,7 @@ import java.util.concurrent.atomic.AtomicInteger import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.concurrent.ScalaFutures -import scala.concurrent.Future +import scala.concurrent.{ Await, Future } class DummyExtension1 extends Extension object DummyExtension1 extends ExtensionId[DummyExtension1] { @@ -95,7 +95,7 @@ class ExtensionsSpec extends TypedSpecSetup { def `04 handle extensions that fail to initialize`(): Unit = { def create(): Unit = { - ActorSystem[Any]("ExtensionsSpec04", Behavior.EmptyBehavior, config = Some(ConfigFactory.parseString( + ActorSystem[Any](Behavior.EmptyBehavior, "ExtensionsSpec04", config = Some(ConfigFactory.parseString( """ akka.typed.extensions = ["akka.typed.FailingToLoadExtension$"] """))) @@ -152,8 +152,25 @@ class ExtensionsSpec extends TypedSpecSetup { instance1 should be theSameInstanceAs instance2 } + def `10 not create an extension multiple times when using the ActorSystemAdapter`(): Unit = { + import akka.typed.scaladsl.adapter._ + val untypedSystem = akka.actor.ActorSystem() + try { + + val before = InstanceCountingExtension.createCount.get() + InstanceCountingExtension(untypedSystem.toTyped) + val ext = InstanceCountingExtension(untypedSystem.toTyped) + val after = InstanceCountingExtension.createCount.get() + + (after - before) should ===(1) + + } finally { + untypedSystem.terminate().futureValue + } + } + def withEmptyActorSystem[T](name: String, config: Option[Config] = None)(f: ActorSystem[_] ⇒ T): T = { - val system = ActorSystem[Any](name, Behavior.EmptyBehavior, config = config) + val system = ActorSystem[Any](Behavior.EmptyBehavior, name, config = config) try f(system) finally system.terminate().futureValue } diff --git a/akka-typed-tests/src/test/scala/akka/typed/TypedSpec.scala b/akka-typed-tests/src/test/scala/akka/typed/TypedSpec.scala index e4460568b9..e2b0a6b886 100644 --- a/akka-typed-tests/src/test/scala/akka/typed/TypedSpec.scala +++ b/akka-typed-tests/src/test/scala/akka/typed/TypedSpec.scala @@ -60,7 +60,7 @@ abstract class TypedSpec(val config: Config) extends TypedSpecSetup { private var nativeSystemUsed = false lazy val nativeSystem: ActorSystem[TypedSpec.Command] = { - val sys = ActorSystem(AkkaSpec.getCallerName(classOf[TypedSpec]), guardian(), config = Some(config withFallback AkkaSpec.testConf)) + val sys = ActorSystem(guardian(), AkkaSpec.getCallerName(classOf[TypedSpec]), config = Some(config withFallback AkkaSpec.testConf)) nativeSystemUsed = true sys } diff --git a/akka-typed-tests/src/test/scala/akka/typed/internal/ActorSystemSpec.scala b/akka-typed-tests/src/test/scala/akka/typed/internal/ActorSystemSpec.scala index 003358eec1..182deccba5 100644 --- a/akka-typed-tests/src/test/scala/akka/typed/internal/ActorSystemSpec.scala +++ b/akka-typed-tests/src/test/scala/akka/typed/internal/ActorSystemSpec.scala @@ -26,11 +26,11 @@ class ActorSystemSpec extends Spec with Matchers with BeforeAndAfterAll with Sca case class Probe(msg: String, replyTo: ActorRef[String]) trait CommonTests { - def system[T](name: String, behavior: Behavior[T]): ActorSystem[T] + def system[T](behavior: Behavior[T], name: String): ActorSystem[T] def suite: String def withSystem[T](name: String, behavior: Behavior[T], doTerminate: Boolean = true)(block: ActorSystem[T] ⇒ Unit): Terminated = { - val sys = system(s"$suite-$name", behavior) + val sys = system(behavior, s"$suite-$name") try { block(sys) if (doTerminate) sys.terminate().futureValue else sys.whenTerminated.futureValue @@ -55,13 +55,15 @@ class ActorSystemSpec extends Spec with Matchers with BeforeAndAfterAll with Sca def `must terminate the guardian actor`(): Unit = { val inbox = Inbox[String]("terminate") - val sys = system("terminate", immutable[Probe] { - case (_, _) ⇒ unhandled - } onSignal { - case (ctx, PostStop) ⇒ - inbox.ref ! "done" - same - }) + val sys = system( + immutable[Probe] { + case (_, _) ⇒ unhandled + } onSignal { + case (ctx, PostStop) ⇒ + inbox.ref ! "done" + same + }, + "terminate") sys.terminate().futureValue inbox.receiveAll() should ===("done" :: Nil) } @@ -98,7 +100,7 @@ class ActorSystemSpec extends Spec with Matchers with BeforeAndAfterAll with Sca } object `An ActorSystemImpl` extends CommonTests { - def system[T](name: String, behavior: Behavior[T]): ActorSystem[T] = ActorSystem(name, behavior) + def system[T](behavior: Behavior[T], name: String) = ActorSystem(behavior, name) def suite = "native" // this is essential to complete ActorCellSpec, see there @@ -133,7 +135,7 @@ class ActorSystemSpec extends Spec with Matchers with BeforeAndAfterAll with Sca } object `An ActorSystemAdapter` extends CommonTests { - def system[T](name: String, behavior: Behavior[T]): ActorSystem[T] = ActorSystem.adapter(name, behavior) + def system[T](behavior: Behavior[T], name: String) = ActorSystem.adapter(name, behavior) def suite = "adapter" } } diff --git a/akka-typed-tests/src/test/scala/akka/typed/scaladsl/adapter/AdapterSpec.scala b/akka-typed-tests/src/test/scala/akka/typed/scaladsl/adapter/AdapterSpec.scala index 4ad7bf0b50..ead5dffd00 100644 --- a/akka-typed-tests/src/test/scala/akka/typed/scaladsl/adapter/AdapterSpec.scala +++ b/akka-typed-tests/src/test/scala/akka/typed/scaladsl/adapter/AdapterSpec.scala @@ -144,6 +144,15 @@ object AdapterSpec { class AdapterSpec extends AkkaSpec { import AdapterSpec._ + "ActorSystem adaption" must { + "only happen once for a given actor system" in { + val typed1 = system.toTyped + val typed2 = system.toTyped + + typed1 should be theSameInstanceAs typed2 + } + } + "Adapted actors" must { "send message from typed to untyped" in { diff --git a/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala b/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala index 65214cacfe..3982d839b5 100644 --- a/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala +++ b/akka-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala @@ -83,7 +83,7 @@ class IntroSpec extends TypedSpec { // using global pool since we want to run tasks after system.terminate import scala.concurrent.ExecutionContext.Implicits.global - val system: ActorSystem[Greet] = ActorSystem("hello", greeter) + val system: ActorSystem[Greet] = ActorSystem(greeter, "hello") val future: Future[Greeted] = system ? (Greet("world", _)) @@ -133,7 +133,7 @@ class IntroSpec extends TypedSpec { } } - val system = ActorSystem("ChatRoomDemo", main) + val system = ActorSystem(main, "ChatRoomDemo") Await.result(system.whenTerminated, 3.seconds) //#chatroom-main } diff --git a/akka-typed-tests/src/test/scala/docs/akka/typed/MutableIntroSpec.scala b/akka-typed-tests/src/test/scala/docs/akka/typed/MutableIntroSpec.scala index 8b86e518a7..1aa03008ae 100644 --- a/akka-typed-tests/src/test/scala/docs/akka/typed/MutableIntroSpec.scala +++ b/akka-typed-tests/src/test/scala/docs/akka/typed/MutableIntroSpec.scala @@ -105,7 +105,7 @@ class MutableIntroSpec extends TypedSpec { } } - val system = ActorSystem("ChatRoomDemo", main) + val system = ActorSystem(main, "ChatRoomDemo") Await.result(system.whenTerminated, 1.second) //#chatroom-main } diff --git a/akka-typed/src/main/scala/akka/typed/ActorSystem.scala b/akka-typed/src/main/scala/akka/typed/ActorSystem.scala index b0f9092d06..323f3ee53f 100644 --- a/akka-typed/src/main/scala/akka/typed/ActorSystem.scala +++ b/akka-typed/src/main/scala/akka/typed/ActorSystem.scala @@ -159,11 +159,13 @@ object ActorSystem { * Akka Typed [[Behavior]] hierarchies—this system cannot run untyped * [[akka.actor.Actor]] instances. */ - def apply[T](name: String, guardianBehavior: Behavior[T], - guardianProps: Props = Props.empty, - config: Option[Config] = None, - classLoader: Option[ClassLoader] = None, - executionContext: Option[ExecutionContext] = None): ActorSystem[T] = { + def apply[T]( + guardianBehavior: Behavior[T], + name: String, + guardianProps: Props = Props.empty, + config: Option[Config] = None, + classLoader: Option[ClassLoader] = None, + executionContext: Option[ExecutionContext] = None): ActorSystem[T] = { Behavior.validateAsInitial(guardianBehavior) val cl = classLoader.getOrElse(akka.actor.ActorSystem.findClassLoader()) val appConfig = config.getOrElse(ConfigFactory.load(cl)) @@ -175,13 +177,15 @@ object ActorSystem { * Akka Typed [[Behavior]] hierarchies—this system cannot run untyped * [[akka.actor.Actor]] instances. */ - def create[T](name: String, guardianBehavior: Behavior[T], - guardianProps: Optional[Props], - config: Optional[Config], - classLoader: Optional[ClassLoader], - executionContext: Optional[ExecutionContext]): ActorSystem[T] = { + def create[T]( + guardianBehavior: Behavior[T], + name: String, + guardianProps: Optional[Props], + config: Optional[Config], + classLoader: Optional[ClassLoader], + executionContext: Optional[ExecutionContext]): ActorSystem[T] = { import scala.compat.java8.OptionConverters._ - apply(name, guardianBehavior, guardianProps.asScala.getOrElse(EmptyProps), config.asScala, classLoader.asScala, executionContext.asScala) + apply(guardianBehavior, name, guardianProps.asScala.getOrElse(EmptyProps), config.asScala, classLoader.asScala, executionContext.asScala) } /** @@ -189,8 +193,8 @@ object ActorSystem { * Akka Typed [[Behavior]] hierarchies—this system cannot run untyped * [[akka.actor.Actor]] instances. */ - def create[T](name: String, guardianBehavior: Behavior[T]): ActorSystem[T] = - apply(name, guardianBehavior) + def create[T](guardianBehavior: Behavior[T], name: String): ActorSystem[T] = + apply(guardianBehavior, name) /** * Create an ActorSystem based on the untyped [[akka.actor.ActorSystem]] diff --git a/akka-typed/src/main/scala/akka/typed/scaladsl/adapter/AdapterExtension.scala b/akka-typed/src/main/scala/akka/typed/scaladsl/adapter/AdapterExtension.scala new file mode 100644 index 0000000000..955d651183 --- /dev/null +++ b/akka-typed/src/main/scala/akka/typed/scaladsl/adapter/AdapterExtension.scala @@ -0,0 +1,23 @@ +/** + * Copyright (C) 2009-2017 Lightbend Inc. + */ +package akka.typed.scaladsl.adapter + +import akka.actor.ExtendedActorSystem +import akka.annotation.InternalApi +import akka.typed.internal.adapter.ActorSystemAdapter + +/** + * Internal API + * + * To not create a new adapter for every `toTyped` call we create one instance and keep in an extension + */ +@InternalApi private[akka] class AdapterExtension(sys: akka.actor.ActorSystem) extends akka.actor.Extension { + val adapter = ActorSystemAdapter(sys) +} +/** + * Internal API + */ +@InternalApi object AdapterExtension extends akka.actor.ExtensionId[AdapterExtension] { + def createExtension(sys: ExtendedActorSystem): AdapterExtension = new AdapterExtension(sys) +} \ No newline at end of file diff --git a/akka-typed/src/main/scala/akka/typed/scaladsl/adapter/package.scala b/akka-typed/src/main/scala/akka/typed/scaladsl/adapter/package.scala index 73c698ff6b..e8080ed908 100644 --- a/akka-typed/src/main/scala/akka/typed/scaladsl/adapter/package.scala +++ b/akka-typed/src/main/scala/akka/typed/scaladsl/adapter/package.scala @@ -4,6 +4,7 @@ package akka.typed package scaladsl +import akka.actor.ExtendedActorSystem import akka.annotation.InternalApi import akka.typed.internal.adapter._ @@ -40,7 +41,7 @@ package object adapter { def spawn[T](behavior: Behavior[T], name: String, props: Props = Props.empty): ActorRef[T] = ActorRefAdapter(sys.actorOf(PropsAdapter(Behavior.validateAsInitial(behavior), props), name)) - def toTyped: ActorSystem[Nothing] = ActorSystemAdapter(sys) + def toTyped: ActorSystem[Nothing] = AdapterExtension(sys).adapter } /** diff --git a/build.sbt b/build.sbt index b42b60d047..189521ed83 100644 --- a/build.sbt +++ b/build.sbt @@ -103,6 +103,7 @@ lazy val docs = akkaModule("akka-docs") .dependsOn( actor, cluster, clusterMetrics, slf4j, agent, camel, osgi, persistenceTck, persistenceQuery, distributedData, stream, clusterTools % "compile->compile;test->test", + clusterSharding % "compile->compile;test->test", testkit % "compile->compile;test->test", remote % "compile->compile;test->test", persistence % "compile->compile;provided->provided;test->test", diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 3603f49f20..70b15e0389 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -12,6 +12,7 @@ import com.typesafe.sbt.pgp.PgpKeys.publishSigned import sbt.Keys._ import sbt.TestLogger.wrap import sbt._ +import sbtwhitesource.WhiteSourcePlugin.autoImport.whitesourceIgnore object AkkaBuild { @@ -33,7 +34,8 @@ object AkkaBuild { val dontPublishSettings = Seq( publishSigned := (), publish := (), - publishArtifact in Compile := false + publishArtifact in Compile := false, + whitesourceIgnore := true ) val dontPublishDocsSettings = Seq( diff --git a/project/Dependencies.scala b/project/Dependencies.scala index d985fa4457..566b668dda 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -41,7 +41,7 @@ object Dependencies { object Compile { // Compile - val camelCore = "org.apache.camel" % "camel-core" % "2.15.6" exclude("org.slf4j", "slf4j-api") // ApacheV2 + val camelCore = "org.apache.camel" % "camel-core" % "2.17.7" exclude("org.slf4j", "slf4j-api") // ApacheV2 // when updating config version, update links ActorSystem ScalaDoc to link to the updated version val config = "com.typesafe" % "config" % "1.3.1" // ApacheV2 @@ -60,7 +60,7 @@ object Dependencies { val sigar = "org.fusesource" % "sigar" % "1.6.4" // ApacheV2 // reactive streams - val reactiveStreams = "org.reactivestreams" % "reactive-streams" % "1.0.0" // CC0 + val reactiveStreams = "org.reactivestreams" % "reactive-streams" % "1.0.1" // CC0 // ssl-config val sslConfigCore = "com.typesafe" %% "ssl-config-core" % sslConfigVersion // ApacheV2 @@ -75,7 +75,6 @@ object Dependencies { val aeronDriver = "io.aeron" % "aeron-driver" % aeronVersion // ApacheV2 val aeronClient = "io.aeron" % "aeron-client" % aeronVersion // ApacheV2 - object Docs { val sprayJson = "io.spray" %% "spray-json" % "1.3.3" % "test" val gson = "com.google.code.gson" % "gson" % "2.8.0" % "test" @@ -101,8 +100,8 @@ object Dependencies { val jimfs = "com.google.jimfs" % "jimfs" % "1.1" % "test" // ApacheV2 // metrics, measurements, perf testing - val metrics = "com.codahale.metrics" % "metrics-core" % "3.0.2" % "test" // ApacheV2 - val metricsJvm = "com.codahale.metrics" % "metrics-jvm" % "3.0.2" % "test" // ApacheV2 + val metrics = "io.dropwizard.metrics" % "metrics-core" % "3.2.4" % "test" // ApacheV2 + val metricsJvm = "io.dropwizard.metrics" % "metrics-jvm" % "3.2.4" % "test" // ApacheV2 val latencyUtils = "org.latencyutils" % "LatencyUtils" % "1.0.3" % "test" // Free BSD val hdrHistogram = "org.hdrhistogram" % "HdrHistogram" % "2.1.9" % "test" // CC0 val metricsAll = Seq(metrics, metricsJvm, latencyUtils, hdrHistogram) @@ -112,7 +111,7 @@ object Dependencies { val slf4jLog4j = "org.slf4j" % "log4j-over-slf4j" % slf4jVersion % "test" // MIT // reactive streams tck - val reactiveStreamsTck = "org.reactivestreams" % "reactive-streams-tck" % "1.0.0" % "test" // CC0 + val reactiveStreamsTck = "org.reactivestreams" % "reactive-streams-tck" % "1.0.1" % "test" // CC0 } object Provided { diff --git a/project/MiMa.scala b/project/MiMa.scala index 6c15c4a27c..a5162df1b3 100644 --- a/project/MiMa.scala +++ b/project/MiMa.scala @@ -3,46 +3,35 @@ */ package akka -import com.typesafe.tools.mima.core.ProblemFilters import sbt._ import sbt.Keys._ import com.typesafe.tools.mima.plugin.MimaPlugin import com.typesafe.tools.mima.plugin.MimaPlugin.autoImport._ -import scala.util.Try - object MiMa extends AutoPlugin { + private val latestMinorOf25 = 3 + private val latestMinorOf24 = 18 + override def requires = MimaPlugin override def trigger = allRequirements override val projectSettings = Seq( - mimaBackwardIssueFilters ++= mimaIgnoredProblems, mimaPreviousArtifacts := akkaPreviousArtifacts(name.value, organization.value, scalaBinaryVersion.value) ) def akkaPreviousArtifacts(projectName: String, organization: String, scalaBinaryVersion: String): Set[sbt.ModuleID] = { val versions: Seq[String] = { - def latestMinorVersionOf(major: String) = mimaIgnoredProblems.keys - .map(_.stripPrefix(major)) - .map(minor => scala.util.Try(minor.toInt)) - .collect { - case scala.util.Success(m) => m - } - .max - val akka24NoStreamVersions = Seq("2.4.0", "2.4.1") - val akka25Versions = (0 to latestMinorVersionOf("2.5.")).map(patch => s"2.5.$patch") + val akka25Versions = (0 to latestMinorOf25).map(patch => s"2.5.$patch") val akka24StreamVersions = (2 to 12) map ("2.4." + _) val akka24WithScala212 = - (13 to latestMinorVersionOf("2.4.")) + (13 to latestMinorOf24) .map ("2.4." + _) .filterNot(_ == "2.4.15") // 2.4.15 was released from the wrong branch and never announced val akka242NewArtifacts = Seq( "akka-stream", - "akka-http-core", - "akka-http-testkit", "akka-stream-testkit" ) val akka250NewArtifacts = Seq( @@ -76,1167 +65,4 @@ object MiMa extends AutoPlugin { organization %% adjustedProjectName % v }.toSet } - - case class FilterAnyProblem(name: String) extends com.typesafe.tools.mima.core.ProblemFilter { - import com.typesafe.tools.mima.core._ - override def apply(p: Problem): Boolean = p match { - case t: TemplateProblem => t.ref.fullName != name && t.ref.fullName != (name + '$') - case m: MemberProblem => m.ref.owner.fullName != name && m.ref.owner.fullName != (name + '$') - } - } - - case class FilterAnyProblemStartingWith(start: String) extends com.typesafe.tools.mima.core.ProblemFilter { - import com.typesafe.tools.mima.core._ - override def apply(p: Problem): Boolean = p match { - case t: TemplateProblem => !t.ref.fullName.startsWith(start) - case m: MemberProblem => !m.ref.owner.fullName.startsWith(start) - } - } - - def mimaIgnoredProblems = { - import com.typesafe.tools.mima.core._ - - val bcIssuesBetween24and25 = Seq( - // ##22269 GSet as delta-CRDT - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.GSet.this"), // constructor supplied by companion object - - // # 18262 embed FJP, Mailbox extends ForkJoinTask - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#ForkJoinExecutorServiceFactory.threadFactory"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#ForkJoinExecutorServiceFactory.this"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#ForkJoinExecutorServiceFactory.this"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator.validate"), - ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask"), - ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.MonitorableThreadFactory"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.MonitorableThreadFactory.newThread"), - ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinPool"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#AkkaForkJoinPool.this"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.ForkJoinExecutorConfigurator#AkkaForkJoinPool.this"), - ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.Mailbox"), - ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.BalancingDispatcher$SharingMailbox"), - ProblemFilters.exclude[MissingTypesProblem]("akka.dispatch.MonitorableThreadFactory$AkkaForkJoinWorkerThread"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.dispatch.MonitorableThreadFactory#AkkaForkJoinWorkerThread.this"), - - // #21875 delta-CRDT - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.GCounter.this"), - - // #22188 ORSet delta-CRDT - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.ORSet.this"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorToProto"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorFromProto"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorFromBinary"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.protobuf.ReplicatedDataSerializer.versionVectorToProto"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.protobuf.ReplicatedDataSerializer.versionVectorFromProto"), - - // #22141 sharding minCap - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.updatingStateTimeout"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.waitingForStateTimeout"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.this"), - - // #22295 Improve Circuit breaker - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.CircuitBreaker#State.callThrough"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.CircuitBreaker#State.invoke"), - - // #21423 Remove deprecated metrics - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterReadView.clusterMetrics"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.InternalClusterAction$MetricsTick$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsCollector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.Metric"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsCollector$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.Metric$"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsMovingAverageHalfLife"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsGossipInterval"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsCollectorClass"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsInterval"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterSettings.MetricsEnabled"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.JmxMetricsCollector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.SigarMetricsCollector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricNumericConverter"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.ClusterEvent$ClusterMetricsChanged"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsGossipEnvelope"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.NodeMetrics"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$Cpu$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$Cpu"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.InternalClusterAction$PublisherCreated"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.EWMA"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsGossip$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.InternalClusterAction$PublisherCreated$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.NodeMetrics$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsGossipEnvelope$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.ClusterMetricsCollector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.EWMA$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$HeapMemory"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.MetricsGossip"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.ClusterEvent$ClusterMetricsChanged$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.StandardMetrics$HeapMemory$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.SystemLoadAverageMetricsSelector$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingMetricsListener"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.WeightedRoutees"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingPool"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.CpuMetricsSelector$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MixMetricsSelector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.CapacityMetricsSelector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.SystemLoadAverageMetricsSelector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingRoutingLogic"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.HeapMetricsSelector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingPool$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.CpuMetricsSelector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingRoutingLogic$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.HeapMetricsSelector$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MetricsSelector$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingGroup$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MixMetricsSelectorBase"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.AdaptiveLoadBalancingGroup"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MixMetricsSelector$"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.routing.MetricsSelector"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$EWMA$Builder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$MetricOrBuilder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Number"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$NumberType"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossipEnvelopeOrBuilder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Builder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetricsOrBuilder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$NumberOrBuilder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$EWMA"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossip$Builder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossipOrBuilder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossipEnvelope"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossip"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$MetricsGossipEnvelope$Builder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$EWMAOrBuilder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Metric"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Metric$Builder"), - ProblemFilters.exclude[MissingClassProblem]("akka.cluster.protobuf.msg.ClusterMessages$NodeMetrics$Number$Builder"), - - // #22154 Sharding remembering entities with ddata, internal actors - FilterAnyProblemStartingWith("akka.cluster.sharding.Shard"), - FilterAnyProblemStartingWith("akka.cluster.sharding.PersistentShard"), - FilterAnyProblemStartingWith("akka.cluster.sharding.ClusterShardingGuardian"), - FilterAnyProblemStartingWith("akka.cluster.sharding.ShardRegion"), - - // #21647 pruning - FilterAnyProblemStartingWith("akka.cluster.ddata.PruningState"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.RemovedNodePruning.modifiedByNodes"), - FilterAnyProblemStartingWith("akka.cluster.ddata.Replicator"), - FilterAnyProblemStartingWith("akka.cluster.ddata.protobuf.msg"), - - // #21647 pruning - FilterAnyProblemStartingWith("akka.cluster.ddata.PruningState"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.RemovedNodePruning.usingNodes"), - FilterAnyProblemStartingWith("akka.cluster.ddata.Replicator"), - FilterAnyProblemStartingWith("akka.cluster.ddata.protobuf.msg"), - - // #21537 coordinated shutdown - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterCoreDaemon.removed"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.convergence"), - - //#21717 Improvements to AbstractActor API - FilterAnyProblemStartingWith("akka.japi.pf.ReceiveBuilder"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.AbstractActor.receive"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.AbstractActor.createReceive"), - ProblemFilters.exclude[MissingClassProblem]("akka.actor.AbstractActorContext"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.actor.AbstractActor.getContext"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.actor.AbstractActor.emptyBehavior"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.Children.findChild"), - ProblemFilters.exclude[MissingTypesProblem]("akka.actor.ActorCell"), - ProblemFilters.exclude[MissingTypesProblem]("akka.routing.RoutedActorCell"), - ProblemFilters.exclude[MissingTypesProblem]("akka.routing.ResizablePoolCell"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.AbstractPersistentActor.createReceiveRecover"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.AbstractPersistentActor.createReceive"), - - // #21423 removal of deprecated stages (in 2.5.x) - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Source.transform"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.SubSource.transform"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Flow.transform"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.SubFlow.transform"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.transformMaterializing"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.transform"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.transformMaterializing"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.andThen"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.transform"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.transformMaterializing"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.andThen"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.FlowOps.transform"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.FlowOps.andThen"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.Directive"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AsyncDirective"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.TerminationDirective"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage$"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$Become$"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage$PushPullGraphStage"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$EmittingState$"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage$PushPullGraphLogic"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.Context"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.Stage"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.DetachedStage"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$Become"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StageState"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.AbstractStage$PushPullGraphStageWithMaterializedValue"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.DownstreamDirective"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.PushPullStage"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.LifecycleContext"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$EmittingState"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.PushStage"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.DetachedContext"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$State"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.UpstreamDirective"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.FreeDirective"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$AndThen"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.SyncDirective"), - - // deprecated method transform(scala.Function0)akka.stream.scaladsl.FlowOps in class akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl does not have a correspondent in current version - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl.transform"), - // method andThen(akka.stream.impl.Stages#SymbolicStage)akka.stream.scaladsl.FlowOps in class akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl does not have a correspondent in current version - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl.andThen"), - // object akka.stream.stage.StatefulStage#Stay does not have a correspondent in current version - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$Stay$"), - // object akka.stream.stage.StatefulStage#Finish does not have a correspondent in current version - ProblemFilters.exclude[MissingClassProblem]("akka.stream.stage.StatefulStage$Finish$"), - - // #21423 remove deprecated ActorSystem termination methods (in 2.5.x) - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.shutdown"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.isTerminated"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.awaitTermination"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.awaitTermination"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystem.shutdown"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystem.isTerminated"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystem.awaitTermination"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystem.awaitTermination"), - - // #21423 remove deprecated ActorPath.ElementRegex - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorPath.ElementRegex"), - - // #21423 remove some deprecated event bus classes - ProblemFilters.exclude[MissingClassProblem]("akka.event.ActorClassification"), - ProblemFilters.exclude[MissingClassProblem]("akka.event.EventStream$"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.event.EventStream.this"), - ProblemFilters.exclude[MissingClassProblem]("akka.event.japi.ActorEventBus"), - - // #21423 remove deprecated util.Crypt - ProblemFilters.exclude[MissingClassProblem]("akka.util.Crypt"), - ProblemFilters.exclude[MissingClassProblem]("akka.util.Crypt$"), - - // #21423 removal of deprecated serializer constructors (in 2.5.x) - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.ProtobufSerializer.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.MessageContainerSerializer.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.serialization.JavaSerializer.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.serialization.ByteArraySerializer.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.protobuf.ClusterMessageSerializer.this"), - - // #21423 removal of deprecated constructor in PromiseActorRef - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.PromiseActorRef.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.PromiseActorRef.apply"), - - // #21423 remove deprecated methods in routing - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.Pool.nrOfInstances"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.Group.paths"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.PoolBase.nrOfInstances"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.GroupBase.paths"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.GroupBase.getPaths"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.routing.FromConfig.nrOfInstances"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.routing.RemoteRouterConfig.nrOfInstances"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.routing.ClusterRouterGroup.paths"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.routing.ClusterRouterPool.nrOfInstances"), - - // #21423 remove deprecated persist method (persistAll) - // This might filter changes to the ordinary persist method also, but not much to do about that - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persistAsync"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.persist"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.persistAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persistAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persistAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.fsm.AbstractPersistentFSM.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.fsm.AbstractPersistentFSM.persistAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.fsm.AbstractPersistentLoggingFSM.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.fsm.AbstractPersistentLoggingFSM.persistAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.PersistentShard.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.PersistentShard.persistAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.PersistentShardCoordinator.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.PersistentShardCoordinator.persistAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.RemoveInternalClusterShardingData#RemoveOnePersistenceId.persist"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.RemoveInternalClusterShardingData#RemoveOnePersistenceId.persistAsync"), - - // #21423 remove deprecated ARRAY_OF_BYTE_ARRAY - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.ProtobufSerializer.ARRAY_OF_BYTE_ARRAY"), - - // #21423 remove deprecated constructor in DeadlineFailureDetector - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.DeadlineFailureDetector.this"), - - // #21423 removal of deprecated `PersistentView` (in 2.5.x) - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.Update"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.Update$"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView$"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView$ScheduledUpdate"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.AbstractPersistentView"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.UntypedPersistentView"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView$ScheduledUpdate$"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.PersistentView$State"), - - // #22015 removal of deprecated AESCounterSecureInetRNGs - ProblemFilters.exclude[MissingClassProblem]("akka.remote.security.provider.AES128CounterInetRNG"), - ProblemFilters.exclude[MissingClassProblem]("akka.remote.security.provider.AES256CounterInetRNG"), - ProblemFilters.exclude[MissingClassProblem]("akka.remote.security.provider.InternetSeedGenerator"), - ProblemFilters.exclude[MissingClassProblem]("akka.remote.security.provider.InternetSeedGenerator$"), - - // #21648 Prefer reachable nodes in consistency writes/reads - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.ReadWriteAggregator.unreachable"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.WriteAggregator.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.WriteAggregator.props"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.ReadAggregator.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.ReadAggregator.props"), - - // #22035 Make it possible to use anything as the key in a map - FilterAnyProblemStartingWith("akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages"), - FilterAnyProblemStartingWith("akka.cluster.ddata.ORMap"), - FilterAnyProblemStartingWith("akka.cluster.ddata.LWWMap"), - FilterAnyProblemStartingWith("akka.cluster.ddata.PNCounterMap"), - FilterAnyProblemStartingWith("akka.cluster.ddata.ORMultiMap"), - - // #20140 durable distributed data - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReplicationDeleteFailure.apply"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteSuccess.apply"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteResponse.getRequest"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteResponse.request"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.Replicator#Command.request"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator.receiveDelete"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReplicationDeleteFailure.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReplicationDeleteFailure.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteSuccess.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DeleteSuccess.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#Delete.apply"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DataDeleted.apply"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DataDeleted.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#DataDeleted.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#Delete.copy"), - - // #16197 Remove backwards compatible workaround in SnapshotSerializer - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.serialization.SnapshotSerializer$"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.serialization.SnapshotHeader"), - ProblemFilters.exclude[MissingClassProblem]("akka.persistence.serialization.SnapshotHeader$"), - - // #21618 distributed data - ProblemFilters.exclude[MissingTypesProblem]("akka.cluster.ddata.Replicator$ReadMajority$"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReadMajority.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#ReadMajority.apply"), - ProblemFilters.exclude[MissingTypesProblem]("akka.cluster.ddata.Replicator$WriteMajority$"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#WriteMajority.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator#WriteMajority.apply"), - - // #22105 Akka Typed process DSL - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorCell.addFunctionRef"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.dungeon.Children.addFunctionRef"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.Children.addFunctionRef"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.Children.addFunctionRef$default$2"), - - // implementation classes - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.SubFlowImpl.transform"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.SubFlowImpl.andThen"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.Stages$SymbolicGraphStage$"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.Stages$SymbolicStage"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.Stages$SymbolicGraphStage"), - - // ddata - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ClusterEvent#ReachabilityEvent.member"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.DurableStore#Store.apply"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.DurableStore#Store.copy$default$2"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.DurableStore#Store.data"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.DurableStore#Store.copy"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.DurableStore#Store.this"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.LmdbDurableStore.dbPut"), - - // #22218 Java Ambiguity in AbstractPersistentActor with Scala 2.12 - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.deferAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persistAllAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActor.persistAll"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.deferAsync"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.persistAllAsync"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.Eventsourced.persistAll"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalPersistAsync"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalPersist"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalPersistAll"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalDeferAsync"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.Eventsourced.internalPersistAllAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActorWithAtLeastOnceDelivery.deliver"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.UntypedPersistentActorWithAtLeastOnceDelivery.deliver"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.AtLeastOnceDeliveryLike.deliver"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.AtLeastOnceDeliveryLike.deliver"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.AtLeastOnceDeliveryLike.internalDeliver"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.AtLeastOnceDeliveryLike.internalDeliver"), - ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.AbstractPersistentActorWithAtLeastOnceDelivery"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActorWithAtLeastOnceDelivery.deliver"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActorWithAtLeastOnceDelivery.deliver"), - ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.AbstractPersistentActor"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.deferAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persistAllAsync"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.AbstractPersistentActor.persistAll"), - - // #22208 remove extension key - ProblemFilters.exclude[MissingClassProblem]("akka.event.Logging$Extension$"), - - // new materializer changes relating to old module structure - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.BidiShape.copyFromPorts"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.BidiShape.reversed"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.MaterializationContext.stageName"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.SinkShape.copyFromPorts"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.Shape.copyFromPorts"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.ClosedShape.copyFromPorts"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$FusedGraph$"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.Attributes.extractName"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.AmorphousShape.copyFromPorts"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.SourceShape.copyFromPorts"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$FusedGraph"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.FlowShape.copyFromPorts"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.Graph.module"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.Graph.traversalBuilder"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Source.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.RunnableGraph#RunnableGraphAdapter.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.BidiFlow.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Sink.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.javadsl.Flow.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Sink.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Sink.this"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.scaladsl.RunnableGraph.apply"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.GraphApply$GraphImpl"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.RunnableGraph.module"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.scaladsl.RunnableGraph.copy"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.scaladsl.RunnableGraph.copy$default$1"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.scaladsl.RunnableGraph.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.BidiFlow.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.BidiFlow.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Builder.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.this"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.GraphApply$"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.module"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.stage.GraphStageWithMaterializedValue.module"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.ModuleExtractor"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.ModuleExtractor$"), - ProblemFilters.excludePackage("akka.stream.impl"), - - // small changes in attributes - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.testkit.StreamTestKit#ProbeSource.withAttributes"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.testkit.StreamTestKit#ProbeSink.withAttributes"), - - // #22332 protobuf serializers for remote deployment - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getConfigManifest"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasScopeManifest"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getScopeManifestBytes"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getConfigSerializerId"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasRouterConfigSerializerId"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasRouterConfigManifest"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getRouterConfigSerializerId"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getRouterConfigManifestBytes"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getConfigManifestBytes"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasConfigManifest"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasScopeSerializerId"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getRouterConfigManifest"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.hasConfigSerializerId"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getScopeSerializerId"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#DeployDataOrBuilder.getScopeManifest"), - - // #22374 introduce fishForSpecificMessage in TestKit - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.fishForSpecificMessage$default$1"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.fishForSpecificMessage"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.fishForSpecificMessage$default$2") - - - // NOTE: filters that will be backported to 2.4 should go to the latest 2.4 version below - ) - - - val Release24Filters = Seq( - "2.4.0" -> Seq( - FilterAnyProblem("akka.remote.transport.ProtocolStateActor"), - - //#18353 Changes to methods and fields private to remoting actors - ProblemFilters.exclude[MissingMethodProblem]("akka.remote.EndpointManager.retryGateEnabled"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.EndpointManager.pruneTimerCancellable"), - - // #18722 internal changes to actor - FilterAnyProblem("akka.cluster.sharding.DDataShardCoordinator"), - - // #18328 optimize VersionVector for size 1 - FilterAnyProblem("akka.cluster.ddata.VersionVector"), - - ProblemFilters.exclude[MissingTypesProblem]("akka.cluster.sharding.ShardRegion$GetCurrentRegions$"), - //FilterAnyProblemStartingWith("akka.cluster.sharding.ShardCoordinator#Internal") - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardCoordinator#Internal#State.apply"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardCoordinator#Internal#State.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardCoordinator#Internal#State.this") - ), - "2.4.1" -> Seq( - // #19008 - FilterAnyProblem("akka.persistence.journal.inmem.InmemJournal"), - FilterAnyProblem("akka.persistence.journal.inmem.InmemStore"), - - // #19133 change in internal actor - ProblemFilters.exclude[MissingMethodProblem]("akka.remote.ReliableDeliverySupervisor.gated"), - - // #18758 report invalid association events - ProblemFilters.exclude[MissingTypesProblem]("akka.remote.InvalidAssociation$"), - ProblemFilters.exclude[MissingMethodProblem]("akka.remote.InvalidAssociation.apply"), - ProblemFilters.exclude[MissingMethodProblem]("akka.remote.InvalidAssociation.copy"), - ProblemFilters.exclude[MissingMethodProblem]("akka.remote.InvalidAssociation.this"), - - // #19281 BackoffSupervisor updates - ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.BackoffSupervisor.akka$pattern$BackoffSupervisor$$child_="), - ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.BackoffSupervisor.akka$pattern$BackoffSupervisor$$restartCount"), - ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.BackoffSupervisor.akka$pattern$BackoffSupervisor$$restartCount_="), - ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.BackoffSupervisor.akka$pattern$BackoffSupervisor$$child"), - - // #19487 - FilterAnyProblem("akka.actor.dungeon.Children"), - - // #19440 - ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.PipeToSupport.pipeCompletionStage"), - ProblemFilters.exclude[MissingMethodProblem]("akka.pattern.FutureTimeoutSupport.afterCompletionStage"), - - ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.PersistenceStash.internalStashOverflowStrategy") - ), - "2.4.2" -> Seq( - //internal API - FilterAnyProblemStartingWith("akka.http.impl"), - - ProblemFilters.exclude[FinalClassProblem]("akka.stream.stage.GraphStageLogic$Reading"), // this class is private - - // lifting this method to the type where it belongs - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.mapMaterializedValue"), - - // #19815 make HTTP compile under Scala 2.12.0-M3 - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.http.scaladsl.model.headers.CacheDirectives#private.apply"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.http.scaladsl.model.headers.CacheDirectives#no-cache.apply"), - - // #19983 withoutSizeLimit overrides for Scala API - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.RequestEntity.withoutSizeLimit"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.UniversalEntity.withoutSizeLimit"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.ResponseEntity.withoutSizeLimit"), - - // #19162 javadsl initialization issues and model cleanup - ProblemFilters.exclude[FinalClassProblem]("akka.http.javadsl.model.MediaTypes"), - - // #19956 Remove exposed case classes in HTTP model - ProblemFilters.exclude[MissingTypesProblem]("akka.http.scaladsl.model.HttpRequest$"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.http.scaladsl.model.HttpRequest.unapply"), // returned Option[HttpRequest], now returns HttpRequest – no Option allocations! - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.$default$1"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.$default$2"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.$default$3"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.$default$4"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.$default$5"), - ProblemFilters.exclude[MissingTypesProblem]("akka.http.scaladsl.model.HttpResponse"), // was a case class (Serializable, Product, Equals) - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.productElement"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.productArity"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.canEqual"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.productIterator"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.productPrefix"), - - ProblemFilters.exclude[MissingTypesProblem]("akka.http.scaladsl.model.HttpRequest"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.productElement"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.productArity"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.canEqual"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.productIterator"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpRequest.productPrefix"), - ProblemFilters.exclude[MissingTypesProblem]("akka.http.scaladsl.model.HttpResponse$"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.http.scaladsl.model.HttpResponse.unapply"), // returned Option[HttpRequest], now returns HttpRequest – no Option allocations! - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.$default$1"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.$default$2"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.$default$3"), - ProblemFilters.exclude[MissingMethodProblem]("akka.http.scaladsl.model.HttpResponse.$default$4"), - - // #19162 fixing javadsl initialization edge-cases - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.javadsl.model.ContentTypes.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.javadsl.model.MediaTypes.this"), - - // #20014 should have been final always - ProblemFilters.exclude[FinalClassProblem]("akka.http.scaladsl.model.EntityStreamSizeException"), - - // #19849 content negotiation fixes - ProblemFilters.exclude[FinalClassProblem]("akka.http.scaladsl.marshalling.Marshal$UnacceptableResponseContentTypeException"), - - // #20009 internal and shouldn't have been public - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.QueueSource.completion"), - - // #20015 simplify materialized value computation tree - ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.StreamLayout#AtomicModule.subModules"), - ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.StreamLayout#AtomicModule.downstreams"), - ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.StreamLayout#AtomicModule.upstreams"), - ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.Stages#DirectProcessor.toString"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.impl.MaterializerSession.materializeAtomic"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.impl.MaterializerSession.materializeAtomic"), - ProblemFilters.exclude[MissingTypesProblem]("akka.stream.impl.Stages$StageModule"), - ProblemFilters.exclude[FinalMethodProblem]("akka.stream.impl.Stages#GroupBy.toString"), - ProblemFilters.exclude[MissingTypesProblem]("akka.stream.impl.FlowModule"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.FlowModule.subModules"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.impl.FlowModule.label"), - ProblemFilters.exclude[FinalClassProblem]("akka.stream.impl.fusing.GraphModule"), - - // #15947 catch mailbox creation failures - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.RepointableActorRef.point"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.Dispatch.initWithFailure"), - - // #19877 Source.queue termination support - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.stream.impl.SourceQueueAdapter.this"), - - // #19828 - ProblemFilters.exclude[DirectAbstractMethodProblem]("akka.persistence.Eventsourced#ProcessingState.onWriteMessageComplete"), - ProblemFilters.exclude[ReversedAbstractMethodProblem]("akka.persistence.Eventsourced#ProcessingState.onWriteMessageComplete"), - - // #19390 Add flow monitor - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.monitor"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.fusing.GraphStages$TickSource$"), - - FilterAnyProblemStartingWith("akka.http.impl"), - - // #20214 - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.DefaultSSLContextCreation.createClientHttpsContext"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.DefaultSSLContextCreation.validateAndWarnAboutLooseSettings") - ), - "2.4.4" -> Seq( - // #20080, #20081 remove race condition on HTTP client - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.scaladsl.Http#HostConnectionPool.gatewayFuture"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.http.scaladsl.Http#HostConnectionPool.copy"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.http.scaladsl.Http#HostConnectionPool.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.scaladsl.HttpExt.hostPoolCache"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.scaladsl.HttpExt.cachedGateway"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.http.scaladsl.Http#HostConnectionPool.apply"), - ProblemFilters.exclude[FinalClassProblem]("akka.http.impl.engine.client.PoolGateway"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.engine.client.PoolGateway.currentState"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.engine.client.PoolGateway.apply"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.http.impl.engine.client.PoolGateway.this"), - ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.client.PoolGateway$NewIncarnation$"), - ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.client.PoolGateway$Running$"), - ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.client.PoolGateway$IsShutdown$"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.engine.client.PoolInterfaceActor.this"), - ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.client.PoolGateway$Running"), - ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.client.PoolGateway$IsShutdown"), - ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.client.PoolGateway$NewIncarnation"), - ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.client.PoolGateway$State"), - - // #20371, missing method and typo in another one making it impossible to use HTTPs via setting default HttpsConnectionContext - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.http.scaladsl.HttpExt.setDefaultClientHttpsContext"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.DefaultSSLContextCreation.createServerHttpsContext"), - - // #20342 HttpEntity scaladsl overrides - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.HttpEntity.withoutSizeLimit"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.HttpEntity.withSizeLimit"), - - // #20293 Use JDK7 NIO Path instead of File - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.HttpMessage#MessageTransformations.withEntity"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.HttpMessage.withEntity"), - - // #20401 custom media types registering - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.impl.model.parser.CommonActions.customMediaTypes"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.model.parser.HeaderParser.Settings"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.impl.model.parser.HeaderParser#Settings.customMediaTypes"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.impl.engine.parsing.HttpHeaderParser#Settings.customMediaTypes"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.settings.ParserSettingsImpl.apply"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.settings.ParserSettingsImpl.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.settings.ParserSettingsImpl.this"), - - // #20123 - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.recoverWithRetries"), - - // #20379 Allow registering custom media types - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.settings.ParserSettings.getCustomMediaTypes"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.settings.ParserSettings.customMediaTypes"), - - // internal api - FilterAnyProblemStartingWith("akka.stream.impl"), - FilterAnyProblemStartingWith("akka.http.impl"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.util.package.printEvent"), - - // #20362 - parser private - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.impl.model.parser.CommonRules.expires-date"), - - // #20319 - remove not needed "no. of persists" counter in sharding - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.PersistentShard.persistCount"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.PersistentShard.persistCount_="), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.PersistentShardCoordinator.persistCount"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.PersistentShardCoordinator.persistCount_="), - - // #19225 - GraphStage and removal of isTerminated - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.engine.parsing.HttpMessageParser.isTerminated"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.http.impl.engine.parsing.HttpMessageParser.stage"), - - // #20131 - flow combinator - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.backpressureTimeout"), - - // #20470 - new JavaDSL for Akka HTTP - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.DateTime.plus"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.DateTime.minus"), - - // #20214 - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.DefaultSSLContextCreation.createClientHttpsContext"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.DefaultSSLContextCreation.validateAndWarnAboutLooseSettings"), - - // #20257 Snapshots with PersistentFSM (experimental feature) - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.serialization.MessageFormats#PersistentStateChangeEventOrBuilder.getTimeoutNanos"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.serialization.MessageFormats#PersistentStateChangeEventOrBuilder.hasTimeoutNanos"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.fsm.PersistentFSM.saveStateSnapshot"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.fsm.PersistentFSM.akka$persistence$fsm$PersistentFSM$$currentStateTimeout"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.fsm.PersistentFSM.akka$persistence$fsm$PersistentFSM$$currentStateTimeout_="), - - // #19834 - ProblemFilters.exclude[MissingTypesProblem]("akka.stream.extra.Timed$StartTimed"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#StartTimed.onPush"), - ProblemFilters.exclude[MissingTypesProblem]("akka.stream.extra.Timed$TimedInterval"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#TimedInterval.onPush"), - ProblemFilters.exclude[MissingTypesProblem]("akka.stream.extra.Timed$StopTimed"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#StopTimed.onPush"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#StopTimed.onUpstreamFinish"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.extra.Timed#StopTimed.onUpstreamFailure"), - - // #20462 - now uses a Set instead of a Seq within the private API of the cluster client - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.client.ClusterClient.contacts_="), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.client.ClusterClient.contacts"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.client.ClusterClient.initialContactsSel"), - - // * field EMPTY in class akka.http.javadsl.model.HttpEntities's type is different in current version, where it is: akka.http.javadsl.model.HttpEntity#Strict rather than: akka.http.scaladsl.model.HttpEntity#Strict - ProblemFilters.exclude[IncompatibleFieldTypeProblem]("akka.http.javadsl.model.HttpEntities.EMPTY"), - // method createIndefiniteLength(akka.http.javadsl.model.ContentType,akka.stream.javadsl.Source)akka.http.scaladsl.model.HttpEntity#IndefiniteLength in class akka.http.javadsl.model.HttpEntities has a different result type in current version, where it is akka.http.javadsl.model.HttpEntity#IndefiniteLength rather than akka.http.scaladsl.model.HttpEntity#IndefiniteLength - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.http.javadsl.model.HttpEntities.createIndefiniteLength"), - // method createCloseDelimited(akka.http.javadsl.model.ContentType,akka.stream.javadsl.Source)akka.http.scaladsl.model.HttpEntity#CloseDelimited in class akka.http.javadsl.model.HttpEntities has a different result type in current version, where it is akka.http.javadsl.model.HttpEntity#CloseDelimited rather than akka.http.scaladsl.model.HttpEntity#CloseDelimited - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.http.javadsl.model.HttpEntities.createCloseDelimited"), - // method createChunked(akka.http.javadsl.model.ContentType,akka.stream.javadsl.Source)akka.http.scaladsl.model.HttpEntity#Chunked in class akka.http.javadsl.model.HttpEntities has a different result type in current version, where it is akka.http.javadsl.model.HttpEntity#Chunked rather than akka.http.scaladsl.model.HttpEntity#Chunked - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.http.javadsl.model.HttpEntities.createChunked"), - // method create(akka.http.javadsl.model.ContentType,akka.stream.javadsl.Source)akka.http.scaladsl.model.HttpEntity#Chunked in class akka.http.javadsl.model.HttpEntities has a different result type in current version, where it is akka.http.javadsl.model.HttpEntity#Chunked rather than akka.http.scaladsl.model.HttpEntity#Chunked - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.http.javadsl.model.HttpEntities.create") - ), - "2.4.6" -> Seq( - // internal api - FilterAnyProblemStartingWith("akka.stream.impl"), - - // #20214 SNI disabling for single connections (AkkaSSLConfig being passed around) - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.ConnectionContext.sslConfig"), // class meant only for internal extension - - //#20229 migrate GroupBy to GraphStage - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Builder.deprecatedAndThen"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.deprecatedAndThen"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Flow.deprecatedAndThenMat"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Source.deprecatedAndThen"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.FlowOps.deprecatedAndThen"), - - // #20367 Converts DelimiterFramingStage from PushPullStage to GraphStage - ProblemFilters.exclude[MissingTypesProblem]("akka.stream.scaladsl.Framing$DelimiterFramingStage"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#DelimiterFramingStage.onPush"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#DelimiterFramingStage.onUpstreamFinish"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#DelimiterFramingStage.onPull"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#DelimiterFramingStage.postStop"), - - // #20345 converts LengthFieldFramingStage to GraphStage - ProblemFilters.exclude[MissingTypesProblem]("akka.stream.scaladsl.Framing$LengthFieldFramingStage"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#LengthFieldFramingStage.onPush"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#LengthFieldFramingStage.onUpstreamFinish"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#LengthFieldFramingStage.onPull"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.Framing#LengthFieldFramingStage.postStop"), - - // #20414 Allow different ActorMaterializer subtypes - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.ActorMaterializer.downcast"), - - // #20531 adding refuseUid to Gated - FilterAnyProblem("akka.remote.EndpointManager$Gated"), - - // #20683 - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.HttpMessage.discardEntityBytes"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.HttpMessage.discardEntityBytes"), - - // #20288 migrate BodyPartRenderer to GraphStage - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.http.impl.engine.rendering.BodyPartRenderer.streamed"), - - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.scaladsl.TLS.apply$default$5"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.TLS.apply$default$4"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.scaladsl.GraphDSL#Implicits#PortOpsImpl.deprecatedAndThen") - ), - "2.4.7" -> Seq( - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.ActorMaterializer.downcast"), - FilterAnyProblemStartingWith("akka.cluster.pubsub.DistributedPubSubMediator$Internal"), - - // abstract method discardEntityBytes(akka.stream.Materializer)akka.http.javadsl.model.HttpMessage#DiscardedEntity in interface akka.http.javadsl.model.HttpMessage is present only in current version - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.HttpMessage.discardEntityBytes"), - // method discardEntityBytes(akka.stream.Materializer)akka.http.scaladsl.model.HttpMessage#DiscardedEntity in trait akka.http.scaladsl.model.HttpMessage is present only in current version - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.HttpMessage.discardEntityBytes") - ), - "2.4.8" -> Seq( - // #20717 example snippet for akka http java dsl: SecurityDirectives - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.HttpMessage#MessageTransformations.addCredentials"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.HttpMessage.addCredentials"), - - // #20456 adding hot connection pool option - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.settings.ConnectionPoolSettings.getMinConnections"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.settings.ConnectionPoolSettings.minConnections"), - FilterAnyProblemStartingWith("akka.http.impl"), - - // #20846 change of internal Status message - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.pubsub.protobuf.msg.DistributedPubSubMessages#StatusOrBuilder.getReplyToStatus"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.pubsub.protobuf.msg.DistributedPubSubMessages#StatusOrBuilder.hasReplyToStatus"), - - // #20543 GraphStage subtypes should not be private to akka - ProblemFilters.exclude[DirectAbstractMethodProblem]("akka.stream.ActorMaterializer.actorOf"), - - // Interpreter internals change - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.stage.GraphStageLogic.portToConn"), - - // #20994 adding new decode method, since we're on JDK7+ now - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.util.ByteString.decodeString"), - - // #20508 HTTP: Document how to be able to support custom request methods - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.HttpMethod.getRequestEntityAcceptance"), - - // #20976 provide different options to deal with the illegal response header value - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.settings.ParserSettings.getIllegalResponseHeaderValueProcessingMode"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.settings.ParserSettings.illegalResponseHeaderValueProcessingMode"), - - ProblemFilters.exclude[DirectAbstractMethodProblem]("akka.stream.ActorMaterializer.actorOf"), - - // #20628 migrate Masker to GraphStage - ProblemFilters.exclude[MissingTypesProblem]("akka.http.impl.engine.ws.Masking$Masking"), - ProblemFilters.exclude[MissingTypesProblem]("akka.http.impl.engine.ws.Masking$Masker"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.http.impl.engine.ws.Masking#Masker.initial"), - ProblemFilters.exclude[MissingClassProblem]("akka.http.impl.engine.ws.Masking$Masker$Running"), - ProblemFilters.exclude[MissingTypesProblem]("akka.http.impl.engine.ws.Masking$Unmasking"), - - // # - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.javadsl.model.HttpEntity.discardBytes"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.HttpEntity.discardBytes"), - - // #20630 corrected return types of java methods - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.javadsl.RunnableGraph#RunnableGraphAdapter.named"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.javadsl.RunnableGraph.withAttributes"), - - // #19872 double wildcard for actor deployment config - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.actor.Deployer.lookup"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.WildcardTree.apply"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.WildcardTree.find"), - - // #20942 ClusterSingleton - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.singleton.ClusterSingletonManager.addRemoved"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.singleton.ClusterSingletonManager.selfAddressOption") - ), - "2.4.9" -> Seq( - // #21025 new orElse flow op - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.orElseGraph"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.orElse"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOpsMat.orElseMat"), - - // #21201 adding childActorOf to TestActor / TestKit / TestProbe - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.childActorOf$default$3"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.childActorOf$default$2"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.testkit.TestKitBase.childActorOf"), - - // #21184 add java api for ws testkit - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.ws.TextMessage.asScala"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.ws.TextMessage.getStreamedText"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.ws.BinaryMessage.asScala"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.http.scaladsl.model.ws.BinaryMessage.getStreamedData"), - - // #21273 minor cleanup of WildcardIndex - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.WildcardIndex.empty"), - - // #20888 new FoldAsync op for Flow - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.foldAsync"), - - // method ChaseLimit()Int in object akka.stream.impl.fusing.GraphInterpreter does not have a correspondent in current version - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.fusing.GraphInterpreter.ChaseLimit"), - FilterAnyProblemStartingWith("akka.http.impl.engine") - ), - "2.4.10" -> Seq( - // #21290 new zipWithIndex flow op - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.zipWithIndex"), - - // Remove useUntrustedMode which is an internal API and not used anywhere anymore - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.Remoting.useUntrustedMode"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.RemoteTransport.useUntrustedMode"), - - // Use OptionVal in remote Send envelope - FilterAnyProblemStartingWith("akka.remote.EndpointManager"), - FilterAnyProblemStartingWith("akka.remote.Remoting"), - FilterAnyProblemStartingWith("akka.remote.RemoteTransport"), - FilterAnyProblemStartingWith("akka.remote.InboundMessageDispatcher"), - FilterAnyProblemStartingWith("akka.remote.DefaultMessageDispatcher"), - FilterAnyProblemStartingWith("akka.remote.transport"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.RemoteActorRefProvider.quarantine"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.RemoteWatcher.quarantine"), - - // #20644 long uids - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#UniqueAddressOrBuilder.hasUid2"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#UniqueAddressOrBuilder.getUid2"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.msg.ReplicatorMessages#UniqueAddressOrBuilder.hasUid2"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.msg.ReplicatorMessages#UniqueAddressOrBuilder.getUid2"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.RemoteWatcher.receiveHeartbeatRsp"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.RemoteWatcher.selfHeartbeatRspMsg"), - - // #21131 new implementation for Akka Typed - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.DeathWatch.isWatching"), - - // class akka.stream.impl.fusing.Map is declared final in current version - ProblemFilters.exclude[FinalClassProblem]("akka.stream.impl.fusing.Map") - ), - "2.4.11" -> Seq( - // #20795 IOResult construction exposed - ProblemFilters.exclude[MissingTypesProblem]("akka.stream.IOResult$"), - - // #21727 moved all of Unfold.scala in package akka.stream.impl - ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.UnfoldAsync"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.scaladsl.Unfold"), - - // #21194 renamed internal actor method - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardCoordinator.allocateShardHomes"), - - // MarkerLoggingAdapter introduced (all internal classes) - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.actor.LocalActorRefProvider.log"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.actor.VirtualPathContainer.log"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.actor.VirtualPathContainer.this"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.RemoteSystemDaemon.this"), - - // method this(akka.actor.ExtendedActorSystem,akka.remote.RemoteActorRefProvider,akka.event.LoggingAdapter)Unit in class akka.remote.DefaultMessageDispatcher's type is different in current version, where it is (akka.actor.ExtendedActorSystem,akka.remote.RemoteActorRefProvider,akka.event.MarkerLoggingAdapter)Unit instead of (akka.actor.ExtendedActorSystem,akka.remote.RemoteActorRefProvider,akka.event.LoggingAdapter)Unit - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.DefaultMessageDispatcher.this"), - // trait akka.remote.artery.StageLogging does not have a correspondent in current version - ProblemFilters.exclude[MissingClassProblem]("akka.remote.artery.StageLogging"), - // method SSLProtocol()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLProtocol"), - // method SSLTrustStorePassword()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLTrustStorePassword"), - // method SSLKeyStorePassword()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLKeyStorePassword"), - // method SSLRandomNumberGenerator()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLRandomNumberGenerator"), - // method SSLKeyPassword()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLKeyPassword"), - // method SSLKeyStore()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLKeyStore"), - // method SSLTrustStore()scala.Option in class akka.remote.transport.netty.SSLSettings has a different result type in current version, where it is java.lang.String rather than scala.Option - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.transport.netty.SSLSettings.SSLTrustStore"), - // method initializeClientSSL(akka.remote.transport.netty.SSLSettings,akka.event.LoggingAdapter)org.jboss.netty.handler.ssl.SslHandler in object akka.remote.transport.netty.NettySSLSupport does not have a correspondent in current version - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.NettySSLSupport.initializeClientSSL"), - // method apply(akka.remote.transport.netty.SSLSettings,akka.event.LoggingAdapter,Boolean)org.jboss.netty.handler.ssl.SslHandler in object akka.remote.transport.netty.NettySSLSupport's type is different in current version, where it is (akka.remote.transport.netty.SSLSettings,akka.event.MarkerLoggingAdapter,Boolean)org.jboss.netty.handler.ssl.SslHandler instead of (akka.remote.transport.netty.SSLSettings,akka.event.LoggingAdapter,Boolean)org.jboss.netty.handler.ssl.SslHandler - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.transport.netty.NettySSLSupport.apply"), - // initializeCustomSecureRandom(scala.Option,akka.event.LoggingAdapter)java.security.SecureRandom in object akka.remote.transport.netty.NettySSLSupport does not have a correspondent in current version - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.NettySSLSupport.initializeCustomSecureRandom"), - // method initializeServerSSL(akka.remote.transport.netty.SSLSettings,akka.event.LoggingAdapter)org.jboss.netty.handler.ssl.SslHandler in object akka.remote.transport.netty.NettySSLSupport does not have a correspondent in current version - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.NettySSLSupport.initializeServerSSL"), - // abstract method makeLogger(java.lang.Class)akka.event.LoggingAdapter in interface akka.stream.MaterializerLoggingProvider is inherited by class ActorMaterializer in current version. - ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.MaterializerLoggingProvider.makeLogger"), - FilterAnyProblemStartingWith("akka.stream.impl"), - // synthetic method currentEventsByTag$default$2()Long in class akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal has a different result type in current version, where it is akka.persistence.query.Offset rather than Long - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal.currentEventsByTag$default$2"), - // synthetic method eventsByTag$default$2()Long in class akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal has a different result type in current version, where it is akka.persistence.query.Offset rather than Long - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal.eventsByTag$default$2"), - - // #21330 takeWhile inclusive flag - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.takeWhile"), - - // #21541 new ScanAsync flow op - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.scanAsync") - ), - "2.4.12" -> Seq( - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.Materializer.materialize"), - - // #21775 - overrode ByteString.stringPrefix and made it final - ProblemFilters.exclude[FinalMethodProblem]("akka.util.ByteString.stringPrefix"), - - // #20553 Tree flattening should be separate from Fusing - ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$StructuralInfo"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$StructuralInfo$") - ), - "2.4.13" -> Seq( - // extension method isEmpty$extension(Int)Boolean in object akka.remote.artery.compress.TopHeavyHitters#HashCodeVal does not have a correspondent in current version - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.artery.compress.TopHeavyHitters#HashCodeVal.isEmpty$extension"), - // isEmpty()Boolean in class akka.remote.artery.compress.TopHeavyHitters#HashCodeVal does not have a correspondent in current version - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.artery.compress.TopHeavyHitters#HashCodeVal.isEmpty") - ), - "2.4.14" -> Seq( - // # 21944 - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ClusterEvent#ReachabilityEvent.member"), - - // #21645 durable distributed data - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.WriteAggregator.props"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.WriteAggregator.this"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.Replicator.write"), - - // #21394 remove static config path of levelDBJournal and localSnapshotStore - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.snapshot.local.LocalSnapshotStore.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.journal.leveldb.LeveldbStore.configPath"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.journal.leveldb.LeveldbJournal.configPath"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.journal.leveldb.SharedLeveldbStore.configPath"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.journal.leveldb.LeveldbStore.prepareConfig"), - - // #20737 aligned test sink and test source stage factory methods types - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.stream.testkit.TestSinkStage.apply"), - - FilterAnyProblemStartingWith("akka.stream.impl"), - FilterAnyProblemStartingWith("akka.remote.artery"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.remote.MessageSerializer.serializeForArtery"), - - // https://github.com/akka/akka/pull/21688 - ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$StructuralInfo$"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.Fusing$StructuralInfo"), - - // https://github.com/akka/akka/pull/21989 - add more information in tcp connection shutdown logs (add mapError) - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.mapError"), - - // #21894 Programmatic configuration of the ActorSystem - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorSystemImpl.this") - ), - "2.4.16" -> Seq( - // internal classes - FilterAnyProblemStartingWith("akka.remote.artery") - ), - "2.4.17" -> Seq( - // #22711 changes to groupedWithin internal classes - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.groupedWeightedWithin"), - - // #22277 changes to internal classes - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.TcpServerHandler.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.transport.netty.TcpClientHandler.this"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.netty.TcpHandlers.log"), - - // #22224 DaemonMsgCreateSerializer using manifests - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData.getClassesBytes"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData.getClassesList"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData.getClassesCount"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData.getClasses"), - ProblemFilters.exclude[MissingFieldProblem]("akka.remote.WireFormats#PropsData.CLASSES_FIELD_NUMBER"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getHasManifest"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getHasManifestCount"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getSerializerIdsList"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getSerializerIds"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getHasManifestList"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getSerializerIdsCount"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getClassesBytes"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getClassesList"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getClassesCount"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getClasses"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getManifestsBytes"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getManifests"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getManifestsList"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.WireFormats#PropsDataOrBuilder.getManifestsCount"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.getClassesBytes"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.getClassesList"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.addClassesBytes"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.getClassesCount"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.clearClasses"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.addClasses"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.getClasses"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.addAllClasses"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.WireFormats#PropsData#Builder.setClasses"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.serialize"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.deserialize"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.deserialize"), - ProblemFilters.exclude[FinalClassProblem]("akka.remote.serialization.DaemonMsgCreateSerializer"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.serialization"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.remote.serialization.DaemonMsgCreateSerializer.this"), - - // #22657 changes to internal classes - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FilePublisher.props"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FilePublisher.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSink.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSource.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSubscriber.props"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSubscriber.this"), - - // Internal MessageBuffer for actors - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.pubsub.PerGroupingBuffer.akka$cluster$pubsub$PerGroupingBuffer$$buffers"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.pubsub.PerGroupingBuffer.akka$cluster$pubsub$PerGroupingBuffer$_setter_$akka$cluster$pubsub$PerGroupingBuffer$$buffers_="), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.singleton.ClusterSingletonProxy.buffer"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.singleton.ClusterSingletonProxy.buffer_="), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.client.ClusterClient.buffer"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.Shard.totalBufferSize"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.sharding.Shard.messageBuffers"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.Shard.messageBuffers_="), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.totalBufferSize"), - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.sharding.ShardRegion.shardBuffers"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.sharding.ShardRegion.shardBuffers_=") - ), - "2.4.18" -> Seq( - ), - "2.4.19" -> Seq( - ) - // make sure that - // * this list ends with the latest released version number - // * is kept in sync between release-2.4 and master branch - ) - - val Release25Filters = Seq( - "2.5.0" -> Seq( - - // #22759 LMDB files - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.env"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.db"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.keyBuffer"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.valueBuffer_="), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.LmdbDurableStore.valueBuffer"), - - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.stream.scaladsl.FlowOps.groupedWeightedWithin"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSubscriber.props"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSource.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSink.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FilePublisher.props"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FileSubscriber.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.io.FilePublisher.this"), - ProblemFilters.exclude[MissingClassProblem]("akka.stream.impl.fusing.GroupedWithin"), - - ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.Graph.traversalBuilder"), - ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.Graph.named"), - ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.Graph.addAttributes"), - ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.stream.Graph.async") - ), - "2.5.1" -> Seq( - // #22794 watchWith - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.ActorContext.watchWith"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.DeathWatch.watchWith"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.DeathWatch.akka$actor$dungeon$DeathWatch$$watching"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.DeathWatch.akka$actor$dungeon$DeathWatch$$watching_="), - - // #22868 store shards - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.sendUpdate"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.waitingForUpdate"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.getState"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.waitingForState"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.this"), - - // #21213 Feature request: Let BackoffSupervisor reply to messages when its child is stopped - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffSupervisor.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOptionsImpl.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOptionsImpl.this"), - ProblemFilters.exclude[MissingTypesProblem]("akka.pattern.BackoffOptionsImpl$"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOptionsImpl.apply"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOnRestartSupervisor.this"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.HandleBackoff.replyWhileStopped"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.BackoffOptions.withReplyWhileStopped") - ), - "2.5.2" -> Seq( - // #22881 Make sure connections are aborted correctly on Windows - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.io.ChannelRegistration.cancel"), - - // #23144 recoverWithRetries cleanup - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.fusing.RecoverWith.InfiniteRetries"), - - // #23025 OversizedPayloadException DeltaPropagation - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.DeltaPropagationSelector.maxDeltaSize"), - - // #23023 added a new overload with implementation to trait, so old transport implementations compiled against - // older versions will be missing the method. We accept that incompatibility for now. - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.AssociationHandle.disassociate") - ) - ) - - val Latest24Filters = Release24Filters.last - val AllFilters = - Release25Filters ++ Release24Filters.dropRight(1) :+ (Latest24Filters._1 -> (Latest24Filters._2 ++ bcIssuesBetween24and25)) - - Map(AllFilters: _*) - } } diff --git a/project/build.properties b/project/build.properties index 64317fdae5..c091b86ca4 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.13.15 +sbt.version=0.13.16 diff --git a/project/plugins.sbt b/project/plugins.sbt index 15355b06b2..e1c5eaaaf4 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -15,7 +15,7 @@ addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "0.7.1") addSbtPlugin("com.typesafe.sbt" % "sbt-osgi" % "0.9.1") -addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.1.14") +addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.1.15") addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.0.0") @@ -40,6 +40,6 @@ addSbtPlugin("com.timushev.sbt" % "sbt-updates" % "0.1.10") addSbtPlugin("com.lightbend.akka" % "sbt-paradox-akka" % "0.3") -addSbtPlugin("com.lightbend" % "sbt-whitesource" % "0.1.2") +addSbtPlugin("com.lightbend" % "sbt-whitesource" % "0.1.5") addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "0.9.3")