From ddc46400078af7595b51f10b97e606782926ece2 Mon Sep 17 00:00:00 2001 From: Mike Krumlauf Date: Wed, 28 Dec 2011 09:11:11 -0500 Subject: [PATCH 01/84] Javadoc modification: Changed system.actorOf(classOf[MyActor]) to system.actorOf(MyActor.class) to reflect Java syntax (instead of Scala). --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index dbe5630789..5e49a1dc0e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -123,7 +123,7 @@ object ActorSystem { * system.actorOf(Props(new MyActor(...)) * * // Java - * system.actorOf(classOf[MyActor]); + * system.actorOf(MyActor.class); * system.actorOf(Props(new Creator() { * public MyActor create() { ... } * }); From 39a96b2ac35618facc60f9dbc257a65e42d50af5 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 1 Jan 2012 20:48:03 +0100 Subject: [PATCH 02/84] Added akka.pattern.gracefulStop. See #1583 --- .../test/scala/akka/pattern/PatternSpec.scala | 52 ++++++++++++++++ .../main/scala/akka/pattern/Patterns.scala | 30 ++++++++++ .../src/main/scala/akka/pattern/package.scala | 59 +++++++++++++++++++ 3 files changed, 141 insertions(+) create mode 100644 akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala create mode 100644 akka-actor/src/main/scala/akka/pattern/Patterns.scala create mode 100644 akka-actor/src/main/scala/akka/pattern/package.scala diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala new file mode 100644 index 0000000000..69297b743a --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -0,0 +1,52 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.pattern + +import akka.testkit.AkkaSpec +import akka.actor.Props +import akka.actor.Actor +import akka.actor.ActorTimeoutException +import akka.dispatch.Await +import akka.util.Duration +import akka.util.duration._ + +object PatternSpec { + case class Work(duration: Duration) + class TargetActor extends Actor { + def receive = { + case Work(duration) ⇒ duration.sleep() + } + } +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class PatternSpec extends AkkaSpec { + + import PatternSpec._ + + "pattern.stop" must { + + "provide Future for stopping an actor" in { + val target = system.actorOf(Props[TargetActor]) + val result = gracefulStop(target, 5 seconds) + Await.result(result, 6 seconds) must be(true) + } + + "complete Future when actor already terminated" in { + val target = system.actorOf(Props[TargetActor]) + Await.ready(gracefulStop(target, 5 seconds), 6 seconds) + Await.ready(gracefulStop(target, 1 millis), 1 second) + } + + "complete Future with ActorTimeoutException when actor not terminated within timeout" in { + val target = system.actorOf(Props[TargetActor]) + target ! Work(200 millis) + val result = gracefulStop(target, 50 millis) + intercept[ActorTimeoutException] { + Await.result(result, 100 millis) + } + } + } +} diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala new file mode 100644 index 0000000000..6840716bad --- /dev/null +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -0,0 +1,30 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.pattern + +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.dispatch.Future +import akka.util.Duration + +/** + * Patterns is the Java API for the Akka patterns that provide solutions + * to commonly occurring problems. + */ +object Patterns { + + /** + * Returns a [[akka.dispatch.Future]] that will be completed with `Right` `true` when + * existing messages of the target actor has been processed and the actor has been + * terminated. + * + * Useful when you need to wait for termination or compose ordered termination of several actors. + * + * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] + * is completed with `Left` [[akka.actor.ActorTimeoutException]]. + */ + def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[Boolean] = { + akka.pattern.gracefulStop(target, timeout)(system) + } +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala new file mode 100644 index 0000000000..eb24d9ae8e --- /dev/null +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -0,0 +1,59 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka + +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.ActorTimeoutException +import akka.actor.PoisonPill +import akka.actor.Props +import akka.actor.ReceiveTimeout +import akka.actor.Terminated +import akka.dispatch.Future +import akka.dispatch.Promise +import akka.util.Duration + +/** + * Akka patterns that provide solutions to commonly occurring problems. + */ +package object pattern { + + /** + * Returns a [[akka.dispatch.Future]] that will be completed with `Right` `true` when + * existing messages of the target actor has been processed and the actor has been + * terminated. + * + * Useful when you need to wait for termination or compose ordered termination of several actors. + * + * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] + * is completed with `Left` [[akka.actor.ActorTimeoutException]]. + */ + def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { + if (target.isTerminated) { + Promise.successful(true)(system.dispatcher) + } else { + val result = Promise[Boolean]()(system.dispatcher) + system.actorOf(Props(new Actor { + // Terminated will be received when target has been stopped + context watch target + target ! PoisonPill + // ReceiveTimeout will be received if nothing else is received within the timeout + context setReceiveTimeout timeout + + def receive = { + case Terminated(a) ⇒ + result.complete(Right(true)) + system.stop(self) + case ReceiveTimeout ⇒ + result.complete(Left( + new ActorTimeoutException("Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)))) + system.stop(self) + } + })) + result + } + } + +} From 3d3b745a26af3c56ba231fccd7b7983826db5f06 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 1 Jan 2012 21:27:52 +0100 Subject: [PATCH 03/84] Improvements based on feedback. See #1583 --- .../test/scala/akka/pattern/PatternSpec.scala | 8 ++++---- .../src/main/scala/akka/pattern/package.scala | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala index 69297b743a..ce1b9db201 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -26,7 +26,7 @@ class PatternSpec extends AkkaSpec { import PatternSpec._ - "pattern.stop" must { + "pattern.gracefulStop" must { "provide Future for stopping an actor" in { val target = system.actorOf(Props[TargetActor]) @@ -42,10 +42,10 @@ class PatternSpec extends AkkaSpec { "complete Future with ActorTimeoutException when actor not terminated within timeout" in { val target = system.actorOf(Props[TargetActor]) - target ! Work(200 millis) - val result = gracefulStop(target, 50 millis) + target ! Work(250 millis) + val result = gracefulStop(target, 10 millis) intercept[ActorTimeoutException] { - Await.result(result, 100 millis) + Await.result(result, 200 millis) } } } diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index eb24d9ae8e..728db960ed 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -32,9 +32,9 @@ package object pattern { */ def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { if (target.isTerminated) { - Promise.successful(true)(system.dispatcher) + Promise.successful(true) } else { - val result = Promise[Boolean]()(system.dispatcher) + val result = Promise[Boolean]() system.actorOf(Props(new Actor { // Terminated will be received when target has been stopped context watch target @@ -43,13 +43,13 @@ package object pattern { context setReceiveTimeout timeout def receive = { - case Terminated(a) ⇒ - result.complete(Right(true)) - system.stop(self) + case Terminated(a) if a == target ⇒ + result success true + context.stop(self) case ReceiveTimeout ⇒ - result.complete(Left( - new ActorTimeoutException("Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)))) - system.stop(self) + result failure new ActorTimeoutException( + "Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)) + context.stop(self) } })) result From f3cc1485387613200360758be1f942130633f6a0 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 1 Jan 2012 21:30:04 +0100 Subject: [PATCH 04/84] format --- akka-actor/src/main/scala/akka/pattern/package.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index 728db960ed..91159d08a9 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -45,11 +45,11 @@ package object pattern { def receive = { case Terminated(a) if a == target ⇒ result success true - context.stop(self) + context stop self case ReceiveTimeout ⇒ result failure new ActorTimeoutException( "Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)) - context.stop(self) + context stop self } })) result From 0a3021eb60ebecfef99d5736706696761d72ff48 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 3 Jan 2012 10:03:58 +0100 Subject: [PATCH 05/84] DOC: Fixed wrong documentation of bounded mailbox, mailbox-capacity should be used, not task-queue-type. See #1601 --- akka-actor/src/main/resources/reference.conf | 6 ++++-- akka-docs/java/dispatchers.rst | 2 +- .../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala | 6 ++---- akka-docs/scala/dispatchers.rst | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 8e45379592..236eadc579 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -162,11 +162,13 @@ akka { # property # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to # deadlock, use with care - # The following are only used for Dispatcher and only if mailbox-capacity > 0 + # The following mailbox-push-timeout-time is only used for type=Dispatcher and + # only if mailbox-capacity > 0 mailbox-capacity = -1 # Specifies the timeout to add a new message to a mailbox that is full - - # negative number means infinite timeout + # negative number means infinite timeout. It is only used for type=Dispatcher + # and only if mailbox-capacity > 0 mailbox-push-timeout-time = 10s # FQCN of the MailboxType, if not specified the default bounded or unbounded diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index 50bae0bd97..fd117f65f9 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -166,7 +166,7 @@ if not specified otherwise. default-dispatcher { # If negative (or zero) then an unbounded mailbox is used (default) # If positive then a bounded mailbox is used and the capacity is set to the number specified - task-queue-size = 1000 + mailbox-capacity = 1000 } } } diff --git a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala index 6b4e17b63b..ba46733cbb 100644 --- a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala @@ -41,10 +41,8 @@ object DispatcherDocSpec { type = Dispatcher core-pool-size-factor = 8.0 max-pool-size-factor = 16.0 - # Specifies the bounded capacity of the task queue - task-queue-size = 100 - # Specifies which type of task queue will be used, can be "array" or "linked" (default) - task-queue-type = "array" + # Specifies the bounded capacity of the mailbox queue + mailbox-capacity = 100 throughput = 3 } //#my-bounded-config diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index 3006152c49..c9923cf459 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -164,7 +164,7 @@ if not specified otherwise. default-dispatcher { # If negative (or zero) then an unbounded mailbox is used (default) # If positive then a bounded mailbox is used and the capacity is set to the number specified - task-queue-size = 1000 + mailbox-capacity = 1000 } } } From 5ba0963d71716d91641b2ab59f2a9ddfad0a4339 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 3 Jan 2012 11:41:49 +0100 Subject: [PATCH 06/84] Added documentation. See #1583 --- .../main/scala/akka/pattern/Patterns.scala | 8 ++--- .../src/main/scala/akka/pattern/package.scala | 4 +-- .../docs/actor/UntypedActorDocTestBase.java | 29 +++++++++++++++++-- akka-docs/java/untyped-actors.rst | 10 +++++++ akka-docs/scala/actors.rst | 9 ++++++ .../code/akka/docs/actor/ActorDocSpec.scala | 25 +++++++++++----- 6 files changed, 70 insertions(+), 15 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index 6840716bad..abf435edc5 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -15,16 +15,16 @@ import akka.util.Duration object Patterns { /** - * Returns a [[akka.dispatch.Future]] that will be completed with `Right` `true` when + * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when * existing messages of the target actor has been processed and the actor has been * terminated. * * Useful when you need to wait for termination or compose ordered termination of several actors. * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] - * is completed with `Left` [[akka.actor.ActorTimeoutException]]. + * is completed with failure [[akka.actor.ActorTimeoutException]]. */ - def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[Boolean] = { - akka.pattern.gracefulStop(target, timeout)(system) + def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = { + akka.pattern.gracefulStop(target, timeout)(system).asInstanceOf[Future[java.lang.Boolean]] } } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index 91159d08a9..b09ee56897 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -21,14 +21,14 @@ import akka.util.Duration package object pattern { /** - * Returns a [[akka.dispatch.Future]] that will be completed with `Right` `true` when + * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when * existing messages of the target actor has been processed and the actor has been * terminated. * * Useful when you need to wait for termination or compose ordered termination of several actors. * * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] - * is completed with `Left` [[akka.actor.ActorTimeoutException]]. + * is completed with failure [[akka.actor.ActorTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { if (target.isTerminated) { diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java index d442ae6461..b1d84a5841 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java @@ -28,6 +28,14 @@ import akka.japi.Procedure; import akka.actor.Terminated; //#import-watch +//#import-gracefulStop +import static akka.pattern.Patterns.gracefulStop; +import akka.dispatch.Future; +import akka.dispatch.Await; +import akka.util.Duration; +import akka.actor.ActorTimeoutException; +//#import-gracefulStop + import akka.actor.Props; import akka.actor.UntypedActor; import akka.actor.UntypedActorFactory; @@ -100,8 +108,7 @@ public class UntypedActorDocTestBase { public void propsActorOf() { ActorSystem system = ActorSystem.create("MySystem"); //#creating-props - ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class).withDispatcher("my-dispatcher"), - "myactor"); + ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class).withDispatcher("my-dispatcher"), "myactor"); //#creating-props myActor.tell("test"); system.shutdown(); @@ -174,6 +181,23 @@ public class UntypedActorDocTestBase { system.shutdown(); } + @Test + public void usePatternsGracefulStop() { + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef actorRef = system.actorOf(new Props(MyUntypedActor.class)); + //#gracefulStop + + try { + Future stopped = gracefulStop(actorRef, Duration.create(5, TimeUnit.SECONDS), system); + Await.result(stopped, Duration.create(6, TimeUnit.SECONDS)); + // the actor has been stopped + } catch (ActorTimeoutException e) { + // the actor wasn't stopped within 5 seconds + } + //#gracefulStop + system.shutdown(); + } + public static class MyActor extends UntypedActor { public MyActor(String s) { @@ -264,6 +288,7 @@ public class UntypedActorDocTestBase { } } } + //#hot-swap-actor //#watch diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index b24b1d6e6c..7e0d788590 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -485,6 +485,16 @@ Use it like this: .. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java :include: import-actors,poison-pill +Graceful Stop +------------- + +:meth:`gracefulStop` is useful if you need to wait for termination or compose ordered +termination of several actors: + +.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java + :include: import-gracefulStop,gracefulStop + + .. _UntypedActor.HotSwap: HotSwap diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 204aa3ce56..558b50fac8 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -538,6 +538,15 @@ stop the actor when the message is processed. ``PoisonPill`` is enqueued as ordinary messages and will be handled after messages that were already queued in the mailbox. +Graceful Stop +------------- + +:meth:`gracefulStop` is useful if you need to wait for termination or compose ordered +termination of several actors: + +.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#gracefulStop + + .. _Actor.HotSwap: Become/Unbecome diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index cdba3d07f3..20ac33480b 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -8,13 +8,7 @@ import akka.actor.Actor import akka.actor.Props import akka.event.Logging import akka.dispatch.Future - -//#imports1 - -//#imports2 import akka.actor.ActorSystem -//#imports2 - import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.MustMatchers import akka.testkit._ @@ -114,7 +108,6 @@ object SwapperApp extends App { //#swapper //#receive-orElse -import akka.actor.Actor.Receive abstract class GenericActor extends Actor { // to be defined in subclassing actor @@ -317,4 +310,22 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { a ! "kill" expectMsg("finished") } + + "using pattern gracefulStop" in { + val actorRef = system.actorOf(Props[MyActor]) + //#gracefulStop + import akka.pattern.gracefulStop + import akka.dispatch.Await + import akka.actor.ActorTimeoutException + + try { + val stopped: Future[Boolean] = gracefulStop(actorRef, 5 seconds)(system) + Await.result(stopped, 6 seconds) + // the actor has been stopped + } catch { + case e: ActorTimeoutException ⇒ // the actor wasn't stopped within 5 seconds + } + //#gracefulStop + + } } From 4f778e826dd6e9dbae0973685f3c457c574fd2ea Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 5 Jan 2012 09:23:02 +0100 Subject: [PATCH 07/84] Renamed RemoteExtension file to RemoteSettings and it's not an Extension --- .../remote/{RemoteExtension.scala => RemoteSettings.scala} | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) rename akka-remote/src/main/scala/akka/remote/{RemoteExtension.scala => RemoteSettings.scala} (99%) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteExtension.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala similarity index 99% rename from akka-remote/src/main/scala/akka/remote/RemoteExtension.scala rename to akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index c8ce919944..69c921ff25 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteExtension.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -9,10 +9,9 @@ import java.util.concurrent.TimeUnit.MILLISECONDS import java.net.InetAddress import akka.config.ConfigurationException import com.eaio.uuid.UUID -import akka.actor._ import scala.collection.JavaConverters._ -class RemoteSettings(val config: Config, val systemName: String) extends Extension { +class RemoteSettings(val config: Config, val systemName: String) { import config._ From 1850f6e5b72d0022db81cfe097bac54e6975eae4 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 5 Jan 2012 09:31:21 +0100 Subject: [PATCH 08/84] DOC: Minor improvement of remote docs --- akka-docs/java/remoting.rst | 41 +++++++++++++++++-------------- akka-docs/java/serialization.rst | 4 +-- akka-docs/scala/remoting.rst | 9 +++---- akka-docs/scala/serialization.rst | 4 +-- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index 6e1e41e663..7ec163726a 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -9,39 +9,42 @@ For an introduction of remoting capabilities of Akka please see :ref:`remoting`. Preparing your ActorSystem for Remoting ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The Akka remoting is a separate jar file. Make sure that you have a dependency from your project to this jar:: +The Akka remoting is a separate jar file. Make sure that you have the following dependency in your project:: - akka-remote.jar + + com.typesafe.akka + akka-remote + 2.0-SNAPSHOT + -First of all you have to change the actor provider from ``LocalActorRefProvider`` to ``RemoteActorRefProvider``:: +To enable remote capabilities in your Akka project you should, at a minimum, add the following changes +to your ``application.conf`` file:: akka { actor { - provider = "akka.remote.RemoteActorRefProvider" + provider = "akka.remote.RemoteActorRefProvider" } - } - -After that you must also add the following settings:: - - akka { remote { + transport = "akka.remote.netty.NettyRemoteSupport" server { - # The hostname or ip to bind the remoting to, - # InetAddress.getLocalHost.getHostAddress is used if empty - hostname = "" - - # The default remote server port clients should connect to. - # Default is 2552 (AKKA) + hostname = "127.0.0.1" port = 2552 } - } + } } -These are the bare minimal settings that must exist in order to get started with remoting. -There are, of course, more properties that can be tweaked. We refer to the following +As you can see in the example above there are four things you need to add to get started: + +* Change provider from ``akka.actor.LocalActorRefProvider`` to ``akka.remote.RemoteActorRefProvider`` +* Add host name - the machine you want to run the actor system on +* Add port number - the port the actor system should listen on + +The example above only illustrates the bare minimum of properties you have to add to enable remoting. +There are lots of more properties that are related to remoting in Akka. We refer to the following reference file for more information: -* `reference.conf of akka-remote `_ +.. literalinclude:: ../../akka-remote/src/main/resources/reference.conf + :language: none Looking up Remote Actors ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/akka-docs/java/serialization.rst b/akka-docs/java/serialization.rst index 1ead054d75..7140b42aac 100644 --- a/akka-docs/java/serialization.rst +++ b/akka-docs/java/serialization.rst @@ -21,7 +21,7 @@ Usage Configuration ------------- -For Akka to know which ``Serializer`` to use for what, you need edit your Akka Configuration, +For Akka to know which ``Serializer`` to use for what, you need edit your :ref:`configuration`, in the "akka.actor.serializers"-section you bind names to implementations of the ``akka.serialization.Serializer`` you wish to use, like this: @@ -90,5 +90,5 @@ which is done by extending ``akka.serialization.JSerializer``, like this: :include: imports,my-own-serializer :exclude: ... -Then you only need to fill in the blanks, bind it to a name in your Akka Configuration and then +Then you only need to fill in the blanks, bind it to a name in your :ref:`configuration` and then list which classes that should be serialized using it. \ No newline at end of file diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index d0b613b190..5a8005fbf2 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -9,11 +9,7 @@ For an introduction of remoting capabilities of Akka please see :ref:`remoting`. Preparing your ActorSystem for Remoting ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The Akka remoting is a separate jar file. Make sure that you have a dependency from your project to this jar:: - - akka-remote.jar - -In you SBT project you should add the following as a dependency:: +The Akka remoting is a separate jar file. Make sure that you have the following dependency in your project:: "com.typesafe.akka" % "akka-remote" % "2.0-SNAPSHOT" @@ -43,7 +39,8 @@ The example above only illustrates the bare minimum of properties you have to ad There are lots of more properties that are related to remoting in Akka. We refer to the following reference file for more information: -* `reference.conf of akka-remote `_ +.. literalinclude:: ../../akka-remote/src/main/resources/reference.conf + :language: none Types of Remote Interaction ^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/akka-docs/scala/serialization.rst b/akka-docs/scala/serialization.rst index 716bcc4eb7..15879b2ce4 100644 --- a/akka-docs/scala/serialization.rst +++ b/akka-docs/scala/serialization.rst @@ -21,7 +21,7 @@ Usage Configuration ------------- -For Akka to know which ``Serializer`` to use for what, you need edit your Akka Configuration, +For Akka to know which ``Serializer`` to use for what, you need edit your :ref:`configuration`, in the "akka.actor.serializers"-section you bind names to implementations of the ``akka.serialization.Serializer`` you wish to use, like this: @@ -89,5 +89,5 @@ First you need to create a class definition of your ``Serializer`` like so: :include: imports,my-own-serializer :exclude: ... -Then you only need to fill in the blanks, bind it to a name in your Akka Configuration and then +Then you only need to fill in the blanks, bind it to a name in your :ref:`configuration` and then list which classes that should be serialized using it. \ No newline at end of file From ee17fd8be710591a01214422e7addf0cd7277789 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 5 Jan 2012 17:54:01 +0100 Subject: [PATCH 09/84] Changed message type to Any instead of AnyRef in ask method. See #1608 --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index f1378db41a..254b19e010 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -127,9 +127,9 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable * the callback will be scheduled concurrently to the enclosing actor. Unfortunately * there is not yet a way to detect these illegal accesses at compile time. */ - def ask(message: AnyRef, timeout: Timeout): Future[AnyRef] = ?(message, timeout).asInstanceOf[Future[AnyRef]] + def ask(message: Any, timeout: Timeout): Future[AnyRef] = ?(message, timeout).asInstanceOf[Future[AnyRef]] - def ask(message: AnyRef, timeoutMillis: Long): Future[AnyRef] = ask(message, new Timeout(timeoutMillis)) + def ask(message: Any, timeoutMillis: Long): Future[AnyRef] = ask(message, new Timeout(timeoutMillis)) /** * Forwards the message and passes the original sender actor as the sender. From a8c6a0d8915d4a5932a3d78410a08e00e59a01f6 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 5 Jan 2012 17:54:33 +0100 Subject: [PATCH 10/84] Java API for RouterConfig. See #1609 * CustomRouterConfig and CustomRoute * Added create methods in the predefined router objects for java equivalent of apply * IndexedSeq instead of Vector in public api --- .../src/main/scala/akka/routing/Routing.scala | 83 ++++++++++++++++--- 1 file changed, 71 insertions(+), 12 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 0c02952b3e..d1cf7d664f 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -5,9 +5,9 @@ package akka.routing import akka.actor._ import java.util.concurrent.atomic.AtomicInteger -import scala.collection.JavaConversions._ import akka.util.{ Duration, Timeout } import akka.config.ConfigurationException +import scala.collection.JavaConversions.iterableAsScalaIterable /** * A RoutedActorRef is an ActorRef that has a set of connected ActorRef and it uses a Router to @@ -21,7 +21,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _path) { @volatile - private[akka] var _routees: Vector[ActorRef] = _ // this MUST be initialized during createRoute + private[akka] var _routees: IndexedSeq[ActorRef] = _ // this MUST be initialized during createRoute def routees = _routees val route = _props.routerConfig.createRoute(_props.copy(routerConfig = NoRouter), actorContext, this) @@ -94,7 +94,7 @@ trait RouterConfig { protected def toAll(sender: ActorRef, routees: Iterable[ActorRef]): Iterable[Destination] = routees.map(Destination(sender, _)) - protected def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): Vector[ActorRef] = (nrOfInstances, routees) match { + protected def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { case (0, Nil) ⇒ throw new IllegalArgumentException("Insufficient information - missing configuration.") case (x, Nil) ⇒ (1 to x).map(_ ⇒ context.actorOf(props))(scala.collection.breakOut) case (_, xs) ⇒ xs.map(context.actorFor(_))(scala.collection.breakOut) @@ -104,9 +104,36 @@ trait RouterConfig { registerRoutees(context, createRoutees(props, context, nrOfInstances, routees)) } - protected def registerRoutees(context: ActorContext, routees: Vector[ActorRef]): Unit = { + protected def registerRoutees(context: ActorContext, routees: IndexedSeq[ActorRef]): Unit = { context.self.asInstanceOf[RoutedActorRef]._routees = routees } + +} + +/** + * Java API for a custom router factory. + * @see akka.routing.RouterConfig + */ +abstract class CustomRouterConfig extends RouterConfig { + override def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = { + val customRoute = createCustomRoute(props, context, ref) + + { + case (sender, message) ⇒ customRoute.destinationsFor(sender, message) + } + } + + def createCustomRoute(props: Props, context: ActorContext, ref: RoutedActorRef): CustomRoute + + protected def registerRoutees(context: ActorContext, routees: java.util.List[ActorRef]): Unit = { + import scala.collection.JavaConverters._ + registerRoutees(context, routees.asScala.toIndexedSeq) + } + +} + +trait CustomRoute { + def destinationsFor(sender: ActorRef, message: Any): java.lang.Iterable[Destination] } /** @@ -189,6 +216,14 @@ case class FromConfig() extends RouterConfig { object RoundRobinRouter { def apply(routees: Iterable[ActorRef]) = new RoundRobinRouter(routees = routees map (_.path.toString)) + + /** + * Java API to create router with the supplied 'routees' actors. + */ + def create(routees: java.lang.Iterable[ActorRef]): RoundRobinRouter = { + import scala.collection.JavaConverters._ + apply(routees.asScala) + } } /** * A Router that uses round-robin to select a connection. For concurrent calls, round robin is just a best effort. @@ -215,8 +250,8 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = * Constructor that sets the routees to be used. * Java API */ - def this(t: java.util.Collection[String]) = { - this(routees = collectionAsScalaIterable(t)) + def this(t: java.lang.Iterable[String]) = { + this(routees = iterableAsScalaIterable(t)) } } @@ -247,6 +282,14 @@ trait RoundRobinLike { this: RouterConfig ⇒ object RandomRouter { def apply(routees: Iterable[ActorRef]) = new RandomRouter(routees = routees map (_.path.toString)) + + /** + * Java API to create router with the supplied 'routees' actors. + */ + def create(routees: java.lang.Iterable[ActorRef]): RandomRouter = { + import scala.collection.JavaConverters._ + apply(routees.asScala) + } } /** * A Router that randomly selects one of the target connections to send a message to. @@ -273,8 +316,8 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil) * Constructor that sets the routees to be used. * Java API */ - def this(t: java.util.Collection[String]) = { - this(routees = collectionAsScalaIterable(t)) + def this(t: java.lang.Iterable[String]) = { + this(routees = iterableAsScalaIterable(t)) } } @@ -309,6 +352,14 @@ trait RandomLike { this: RouterConfig ⇒ object BroadcastRouter { def apply(routees: Iterable[ActorRef]) = new BroadcastRouter(routees = routees map (_.path.toString)) + + /** + * Java API to create router with the supplied 'routees' actors. + */ + def create(routees: java.lang.Iterable[ActorRef]): BroadcastRouter = { + import scala.collection.JavaConverters._ + apply(routees.asScala) + } } /** * A Router that uses broadcasts a message to all its connections. @@ -335,8 +386,8 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N * Constructor that sets the routees to be used. * Java API */ - def this(t: java.util.Collection[String]) = { - this(routees = collectionAsScalaIterable(t)) + def this(t: java.lang.Iterable[String]) = { + this(routees = iterableAsScalaIterable(t)) } } @@ -360,6 +411,14 @@ trait BroadcastLike { this: RouterConfig ⇒ object ScatterGatherFirstCompletedRouter { def apply(routees: Iterable[ActorRef], within: Duration) = new ScatterGatherFirstCompletedRouter(routees = routees map (_.path.toString), within = within) + + /** + * Java API to create router with the supplied 'routees' actors. + */ + def create(routees: java.lang.Iterable[ActorRef], within: Duration): ScatterGatherFirstCompletedRouter = { + import scala.collection.JavaConverters._ + apply(routees.asScala, within) + } } /** * Simple router that broadcasts the message to all routees, and replies with the first response. @@ -387,8 +446,8 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It * Constructor that sets the routees to be used. * Java API */ - def this(t: java.util.Collection[String], w: Duration) = { - this(routees = collectionAsScalaIterable(t), within = w) + def this(t: java.lang.Iterable[String], w: Duration) = { + this(routees = iterableAsScalaIterable(t), within = w) } } From ed2b65c402fe54a5a9517bfcd36a51e307ea272a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 5 Jan 2012 17:59:19 +0100 Subject: [PATCH 11/84] DOC: Routing (Java). See #1600 --- .../test/scala/akka/actor/DeployerSpec.scala | 32 +-- .../test/scala/akka/routing/RoutingSpec.scala | 6 +- .../docs/jrouting/CustomRouterDocTest.scala | 5 + .../jrouting/CustomRouterDocTestBase.java | 144 ++++++++++++ .../akka/docs/jrouting/FibonacciActor.java | 48 ++++ .../code/akka/docs/jrouting/ParentActor.java | 60 +++++ .../code/akka/docs/jrouting/PrintlnActor.java | 15 ++ .../docs/jrouting/RouterViaConfigExample.java | 51 ++++ .../jrouting/RouterViaProgramExample.java | 60 +++++ akka-docs/java/routing.rst | 217 +++++++++++++++++- .../akka/docs/routing/RouterTypeExample.scala | 10 +- .../docs/routing/RouterViaConfigExample.scala | 19 +- akka-docs/scala/routing.rst | 79 +++---- 13 files changed, 670 insertions(+), 76 deletions(-) create mode 100644 akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala create mode 100644 akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java create mode 100644 akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java create mode 100644 akka-docs/java/code/akka/docs/jrouting/ParentActor.java create mode 100644 akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java create mode 100644 akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java create mode 100644 akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala index 6c0e699800..92e1f4f3ab 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala @@ -13,28 +13,28 @@ import akka.util.duration._ object DeployerSpec { val deployerConf = ConfigFactory.parseString(""" akka.actor.deployment { - /user/service1 { + /service1 { } - /user/service3 { + /service3 { create-as { class = "akka.actor.DeployerSpec$RecipeActor" } } - /user/service-direct { + /service-direct { router = from-code } - /user/service-direct2 { + /service-direct2 { router = from-code # nr-of-instances ignored when router = direct nr-of-instances = 2 } - /user/service-round-robin { + /service-round-robin { router = round-robin } - /user/service-random { + /service-random { router = random } - /user/service-scatter-gather { + /service-scatter-gather { router = scatter-gather within = 2 seconds } @@ -53,7 +53,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { "A Deployer" must { "be able to parse 'akka.actor.deployment._' with all default values" in { - val service = "/user/service1" + val service = "/service1" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service) deployment must be('defined) @@ -67,13 +67,13 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { } "use None deployment for undefined service" in { - val service = "/user/undefined" + val service = "/undefined" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service) deployment must be(None) } "be able to parse 'akka.actor.deployment._' with recipe" in { - val service = "/user/service3" + val service = "/service3" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service) deployment must be('defined) @@ -90,7 +90,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { intercept[com.typesafe.config.ConfigException.WrongType] { val invalidDeployerConf = ConfigFactory.parseString(""" akka.actor.deployment { - /user/service-invalid-number-of-instances { + /service-invalid-number-of-instances { router = round-robin nr-of-instances = boom } @@ -102,23 +102,23 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { } "be able to parse 'akka.actor.deployment._' with direct router" in { - assertRouting(NoRouter, "/user/service-direct") + assertRouting(NoRouter, "/service-direct") } "ignore nr-of-instances with direct router" in { - assertRouting(NoRouter, "/user/service-direct2") + assertRouting(NoRouter, "/service-direct2") } "be able to parse 'akka.actor.deployment._' with round-robin router" in { - assertRouting(RoundRobinRouter(1), "/user/service-round-robin") + assertRouting(RoundRobinRouter(1), "/service-round-robin") } "be able to parse 'akka.actor.deployment._' with random router" in { - assertRouting(RandomRouter(1), "/user/service-random") + assertRouting(RandomRouter(1), "/service-random") } "be able to parse 'akka.actor.deployment._' with scatter-gather router" in { - assertRouting(ScatterGatherFirstCompletedRouter(nrOfInstances = 1, within = 2 seconds), "/user/service-scatter-gather") + assertRouting(ScatterGatherFirstCompletedRouter(nrOfInstances = 1, within = 2 seconds), "/service-scatter-gather") } def assertRouting(expected: RouterConfig, service: String) { diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index fb2aa50372..711bf04371 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -5,7 +5,7 @@ package akka.routing import java.util.concurrent.atomic.AtomicInteger import akka.actor._ -import collection.mutable.LinkedList +import scala.collection.mutable.LinkedList import akka.testkit._ import akka.util.duration._ import akka.dispatch.Await @@ -33,8 +33,6 @@ object RoutingSpec { @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { - val impl = system.asInstanceOf[ActorSystemImpl] - import akka.routing.RoutingSpec._ "routers in general" must { @@ -399,7 +397,7 @@ class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } "count votes as intended - not as in Florida" in { - val routedActor = system.actorOf(Props[TestActor].withRouter(VoteCountRouter)) + val routedActor = system.actorOf(Props().withRouter(VoteCountRouter)) routedActor ! DemocratVote routedActor ! DemocratVote routedActor ! RepublicanVote diff --git a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala b/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala new file mode 100644 index 0000000000..48e323c634 --- /dev/null +++ b/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTest.scala @@ -0,0 +1,5 @@ +package akka.docs.jrouting; + +import org.scalatest.junit.JUnitSuite + +class CustomRouterDocTest extends CustomRouterDocTestBase with JUnitSuite diff --git a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java b/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java new file mode 100644 index 0000000000..8962b22c57 --- /dev/null +++ b/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java @@ -0,0 +1,144 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.docs.jrouting; + +import java.util.List; +import java.util.Arrays; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import static org.junit.Assert.assertEquals; + +import akka.actor.*; +import akka.routing.*; +import akka.util.Duration; +import akka.util.Timeout; +import akka.dispatch.Await; +import akka.dispatch.Future; +import akka.testkit.AkkaSpec; +import com.typesafe.config.ConfigFactory; + +import static akka.docs.jrouting.CustomRouterDocTestBase.DemocratActor; +import static akka.docs.jrouting.CustomRouterDocTestBase.RepublicanActor; +import static akka.docs.jrouting.CustomRouterDocTestBase.Message.*; + +public class CustomRouterDocTestBase { + + ActorSystem system; + + @Before + public void setUp() { + system = ActorSystem.create("MySystem", AkkaSpec.testConf()); + } + + @After + public void tearDown() { + system.shutdown(); + } + + //#crTest + @Test + public void countVotesAsIntendedNotAsInFlorida() { + ActorRef routedActor = system.actorOf(new Props().withRouter(new VoteCountRouter())); + routedActor.tell(DemocratVote); + routedActor.tell(DemocratVote); + routedActor.tell(RepublicanVote); + routedActor.tell(DemocratVote); + routedActor.tell(RepublicanVote); + Timeout timeout = new Timeout(Duration.parse("1 seconds")); + Future democratsResult = routedActor.ask(DemocratCountResult, timeout); + Future republicansResult = routedActor.ask(RepublicanCountResult, timeout); + + assertEquals(3, Await.result(democratsResult, timeout.duration())); + assertEquals(2, Await.result(republicansResult, timeout.duration())); + } + + //#crTest + + //#CustomRouter + //#crMessages + enum Message { + DemocratVote, DemocratCountResult, RepublicanVote, RepublicanCountResult + } + + //#crMessages + + //#crActors + public static class DemocratActor extends UntypedActor { + int counter = 0; + + public void onReceive(Object msg) { + switch ((Message) msg) { + case DemocratVote: + counter++; + break; + case DemocratCountResult: + getSender().tell(counter, getSelf()); + break; + default: + unhandled(msg); + } + } + } + + public static class RepublicanActor extends UntypedActor { + int counter = 0; + + public void onReceive(Object msg) { + switch ((Message) msg) { + case RepublicanVote: + counter++; + break; + case RepublicanCountResult: + getSender().tell(counter, getSelf()); + break; + default: + unhandled(msg); + } + } + } + + //#crActors + + //#crRouter + public static class VoteCountRouter extends CustomRouterConfig { + + //#crRoute + @Override + public CustomRoute createCustomRoute(Props props, ActorContext context, RoutedActorRef ref) { + final ActorRef democratActor = context.actorOf(new Props(DemocratActor.class), "d"); + final ActorRef republicanActor = context.actorOf(new Props(RepublicanActor.class), "r"); + List routees = Arrays.asList(new ActorRef[] { democratActor, republicanActor }); + + //#crRegisterRoutees + registerRoutees(context, routees); + //#crRegisterRoutees + + //#crRoutingLogic + return new CustomRoute() { + @Override + public Iterable destinationsFor(ActorRef sender, Object msg) { + switch ((Message) msg) { + case DemocratVote: + case DemocratCountResult: + return Arrays.asList(new Destination[] { new Destination(sender, democratActor) }); + case RepublicanVote: + case RepublicanCountResult: + return Arrays.asList(new Destination[] { new Destination(sender, republicanActor) }); + default: + throw new IllegalArgumentException("Unknown message: " + msg); + } + } + }; + //#crRoutingLogic + } + //#crRoute + + } + + //#crRouter + //#CustomRouter + +} diff --git a/akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java b/akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java new file mode 100644 index 0000000000..8e426cf8fe --- /dev/null +++ b/akka-docs/java/code/akka/docs/jrouting/FibonacciActor.java @@ -0,0 +1,48 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.docs.jrouting; + +import java.io.Serializable; + +import akka.actor.UntypedActor; + +//#fibonacciActor +public class FibonacciActor extends UntypedActor { + public void onReceive(Object msg) { + if (msg instanceof FibonacciNumber) { + FibonacciNumber fibonacciNumber = (FibonacciNumber) msg; + getSender().tell(fibonacci(fibonacciNumber.getNbr())); + } else { + unhandled(msg); + } + } + + private int fibonacci(int n) { + return fib(n, 1, 0); + } + + private int fib(int n, int b, int a) { + if (n == 0) + return a; + // recursion + return fib(n - 1, a + b, b); + } + + public static class FibonacciNumber implements Serializable { + private static final long serialVersionUID = 1L; + private final int nbr; + + public FibonacciNumber(int nbr) { + this.nbr = nbr; + } + + public int getNbr() { + return nbr; + } + + } +} + +//#fibonacciActor + diff --git a/akka-docs/java/code/akka/docs/jrouting/ParentActor.java b/akka-docs/java/code/akka/docs/jrouting/ParentActor.java new file mode 100644 index 0000000000..c8d8b019bb --- /dev/null +++ b/akka-docs/java/code/akka/docs/jrouting/ParentActor.java @@ -0,0 +1,60 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.docs.jrouting; + +import akka.routing.ScatterGatherFirstCompletedRouter; +import akka.routing.BroadcastRouter; +import akka.routing.RandomRouter; +import akka.routing.RoundRobinRouter; +import akka.actor.UntypedActor; +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.util.Duration; +import akka.util.Timeout; +import akka.dispatch.Future; +import akka.dispatch.Await; + +//#parentActor +public class ParentActor extends UntypedActor { + public void onReceive(Object msg) { + if (msg.equals("rrr")) { + //#roundRobinRouter + ActorRef roundRobinRouter = getContext().actorOf( + new Props(PrintlnActor.class).withRouter(new RoundRobinRouter(5)), "router"); + for (int i = 1; i <= 10; i++) { + roundRobinRouter.tell(i, getSelf()); + } + //#roundRobinRouter + } else if (msg.equals("rr")) { + //#randomRouter + ActorRef randomRouter = getContext().actorOf(new Props(PrintlnActor.class).withRouter(new RandomRouter(5)), + "router"); + for (int i = 1; i <= 10; i++) { + randomRouter.tell(i, getSelf()); + } + //#randomRouter + } else if (msg.equals("br")) { + //#broadcastRouter + ActorRef broadcastRouter = getContext().actorOf(new Props(PrintlnActor.class).withRouter(new BroadcastRouter(5)), + "router"); + broadcastRouter.tell("this is a broadcast message", getSelf()); + //#broadcastRouter + } else if (msg.equals("sgfcr")) { + //#scatterGatherFirstCompletedRouter + ActorRef scatterGatherFirstCompletedRouter = getContext().actorOf( + new Props(FibonacciActor.class).withRouter(new ScatterGatherFirstCompletedRouter(5, Duration + .parse("2 seconds"))), "router"); + Timeout timeout = getContext().system().settings().ActorTimeout(); + Future futureResult = scatterGatherFirstCompletedRouter.ask(new FibonacciActor.FibonacciNumber(10), + timeout); + int result = (Integer) Await.result(futureResult, timeout.duration()); + //#scatterGatherFirstCompletedRouter + System.out.println(String.format("The result of calculating Fibonacci for 10 is %d", result)); + } else { + unhandled(msg); + } + } +} + +//#parentActor \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java b/akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java new file mode 100644 index 0000000000..d6ad652ebe --- /dev/null +++ b/akka-docs/java/code/akka/docs/jrouting/PrintlnActor.java @@ -0,0 +1,15 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.docs.jrouting; + +import akka.actor.UntypedActor; + +//#printlnActor +public class PrintlnActor extends UntypedActor { + public void onReceive(Object msg) { + System.out.println(String.format("Received message '%s' in actor %s", msg, getSelf().path().name())); + } +} + +//#printlnActor diff --git a/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java b/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java new file mode 100644 index 0000000000..c33a22667b --- /dev/null +++ b/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java @@ -0,0 +1,51 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.docs.jrouting; + +import akka.routing.FromConfig; +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.actor.ActorSystem; +import com.typesafe.config.ConfigFactory; +import com.typesafe.config.Config; + +public class RouterViaConfigExample { + + public static class ExampleActor extends UntypedActor { + public void onReceive(Object msg) { + if (msg instanceof Message) { + Message message = (Message) msg; + System.out.println(String.format("Received %s in router %s", message.getNbr(), getSelf().path().name())); + } else { + unhandled(msg); + } + } + + public static class Message { + private final int nbr; + + public Message(int nbr) { + this.nbr = nbr; + } + + public int getNbr() { + return nbr; + } + + } + } + + public static void main(String... args) { + Config config = ConfigFactory.parseString("akka.actor.deployment {\n" + " /router {\n" + + " router = round-robin\n" + " nr-of-instances = 5\n" + " }\n" + "}\n"); + ActorSystem system = ActorSystem.create("Example", config); + //#configurableRouting + ActorRef router = system.actorOf(new Props(ExampleActor.class).withRouter(new FromConfig()), "router"); + //#configurableRouting + for (int i = 1; i <= 10; i++) { + router.tell(new ExampleActor.Message(i)); + } + } +} \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java b/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java new file mode 100644 index 0000000000..094ac8361f --- /dev/null +++ b/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java @@ -0,0 +1,60 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.docs.jrouting; + +import akka.routing.RoundRobinRouter; +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.actor.ActorSystem; +import java.util.Arrays; + +public class RouterViaProgramExample { + + public static class ExampleActor extends UntypedActor { + public void onReceive(Object msg) { + if (msg instanceof Message) { + Message message = (Message) msg; + System.out.println(String.format("Received %s in router %s", message.getNbr(), getSelf().path().name())); + } else { + unhandled(msg); + } + } + + public static class Message { + private final int nbr; + + public Message(int nbr) { + this.nbr = nbr; + } + + public int getNbr() { + return nbr; + } + + } + } + + public static void main(String... args) { + ActorSystem system = ActorSystem.create("RPE"); + //#programmaticRoutingNrOfInstances + int nrOfInstances = 5; + ActorRef router1 = system.actorOf(new Props(ExampleActor.class).withRouter(new RoundRobinRouter(nrOfInstances))); + //#programmaticRoutingNrOfInstances + for (int i = 1; i <= 6; i++) { + router1.tell(new ExampleActor.Message(i)); + } + + //#programmaticRoutingRoutees + ActorRef actor1 = system.actorOf(new Props(ExampleActor.class)); + ActorRef actor2 = system.actorOf(new Props(ExampleActor.class)); + ActorRef actor3 = system.actorOf(new Props(ExampleActor.class)); + Iterable routees = Arrays.asList(new ActorRef[] { actor1, actor2, actor3 }); + ActorRef router2 = system.actorOf(new Props(ExampleActor.class).withRouter(RoundRobinRouter.create(routees))); + //#programmaticRoutingRoutees + for (int i = 1; i <= 6; i++) { + router2.tell(new ExampleActor.Message(i)); + } + } +} \ No newline at end of file diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 2c0f74c696..16c2a0864d 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -4,9 +4,218 @@ Routing (Java) ============== -This part of the documentation is not done. +.. sidebar:: Contents + + .. contents:: :local: + +Akka-core includes some building blocks to build more complex message flow handlers, they are listed and explained below: + +Router +------ + +A Router is an actor that routes incoming messages to outbound actors. +The router routes the messages sent to it to its underlying actors called 'routees'. + +Akka comes with four defined routers out of the box, but as you will see in this chapter it +is really easy to create your own. The four routers shipped with Akka are: + +* ``akka.routing.RoundRobinRouter`` +* ``akka.routing.RandomRouter`` +* ``akka.routing.BroadcastRouter`` +* ``akka.routing.ScatterGatherFirstCompletedRouter`` + +Routers Explained +^^^^^^^^^^^^^^^^^ + +This is an example of how to create a router that is defined in configuration: + +.. includecode:: ../scala/code/akka/docs/routing/RouterViaConfigExample.scala#config + +.. includecode:: code/akka/docs/jrouting/RouterViaConfigExample.java#configurableRouting + +This is an example of how to programatically create a router and set the number of routees it should create: + +.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingNrOfInstances + +You can also give the router already created routees as in: + +.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingRoutees + +When you create a router programatically you define the number of routees *or* you pass already created routees to it. +If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. + +*It is also worth pointing out that if you define the number of routees in the configuration file then this +value will be used instead of any programmatically sent parameters.* + +Once you have the router actor it is just to send messages to it as you would to any actor: + +.. code-block:: java + + router.tell(new MyMsg()); + +The router will apply its behavior to the message it receives and forward it to the routees. + +Router usage +^^^^^^^^^^^^ + +In this section we will describe how to use the different router types. +First we need to create some actors that will be used in the examples: + +.. includecode:: code/akka/docs/jrouting/PrintlnActor.java#printlnActor + +and + +.. includecode:: code/akka/docs/jrouting/FibonacciActor.java#fibonacciActor + +RoundRobinRouter +**************** +Routes in a `round-robin `_ fashion to its routees. +Code example: + +.. includecode:: code/akka/docs/jrouting/ParentActor.java#roundRobinRouter + +When run you should see a similar output to this: + +.. code-block:: scala + + Received message '1' in actor $b + Received message '2' in actor $c + Received message '3' in actor $d + Received message '6' in actor $b + Received message '4' in actor $e + Received message '8' in actor $d + Received message '5' in actor $f + Received message '9' in actor $e + Received message '10' in actor $f + Received message '7' in actor $c + +If you look closely to the output you can see that each of the routees received two messages which +is exactly what you would expect from a round-robin router to happen. +(The name of an actor is automatically created in the format ``$letter`` unless you specify it - +hence the names printed above.) + +RandomRouter +************ +As the name implies this router type selects one of its routees randomly and forwards +the message it receives to this routee. +This procedure will happen each time it receives a message. +Code example: + +.. includecode:: code/akka/docs/jrouting/ParentActor.java#randomRouter + +When run you should see a similar output to this: + +.. code-block:: scala + + Received message '1' in actor $e + Received message '2' in actor $c + Received message '4' in actor $b + Received message '5' in actor $d + Received message '3' in actor $e + Received message '6' in actor $c + Received message '7' in actor $d + Received message '8' in actor $e + Received message '9' in actor $d + Received message '10' in actor $d + +The result from running the random router should be different, or at least random, every time you run it. +Try to run it a couple of times to verify its behavior if you don't trust us. + +BroadcastRouter +*************** +A broadcast router forwards the message it receives to *all* its routees. +Code example: + +.. includecode:: code/akka/docs/jrouting/ParentActor.java#broadcastRouter + +When run you should see a similar output to this: + +.. code-block:: scala + + Received message 'this is a broadcast message' in actor $f + Received message 'this is a broadcast message' in actor $d + Received message 'this is a broadcast message' in actor $e + Received message 'this is a broadcast message' in actor $c + Received message 'this is a broadcast message' in actor $b + +As you can see here above each of the routees, five in total, received the broadcast message. + +ScatterGatherFirstCompletedRouter +********************************* +The ScatterGatherFirstCompletedRouter will send the message on to all its routees as a future. +It then waits for first result it gets back. This result will be sent back to original sender. +Code example: + +.. includecode:: code/akka/docs/jrouting/ParentActor.java#scatterGatherFirstCompletedRouter + +When run you should see this: + +.. code-block:: scala + + The result of calculating Fibonacci for 10 is 55 + +From the output above you can't really see that all the routees performed the calculation, but they did! +The result you see is from the first routee that returned its calculation to the router. + +Broadcast Messages +^^^^^^^^^^^^^^^^^^ + +There is a special type of message that will be sent to all routees regardless of the router. +This message is called ``Broadcast`` and is used in the following manner: + +.. code-block:: java + + router.tell(new Broadcast("Watch out for Davy Jones' locker")); + +Only the actual message is forwarded to the routees, i.e. "Watch out for Davy Jones' locker" in the example above. +It is up to the routee implementation whether to handle the broadcast message or not. + +Custom Router +^^^^^^^^^^^^^ + +You can also create your own router should you not find any of the ones provided by Akka sufficient for your needs. +In order to roll your own router you have to fulfill certain criteria which are explained in this section. + +The router created in this example is a simple vote counter. It will route the votes to specific vote counter actors. +In this case we only have two parties the Republicans and the Democrats. We would like a router that forwards all +democrat related messages to the Democrat actor and all republican related messages to the Republican actor. + +We begin with defining the class: + +.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRouter + :exclude: crRoute + +The next step is to implement the ``createCustomRoute`` method in the class just defined: + +.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRoute + +As you can see above we start off by creating the routees and put them in a collection. + +Make sure that you don't miss to implement the line below as it is *really* important. +It registers the routees internally and failing to call this method will +cause a ``ActorInitializationException`` to be thrown when the router is used. +Therefore always make sure to do the following in your custom router: + +.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRegisterRoutees + +The routing logic is where your magic sauce is applied. In our example it inspects the message types +and forwards to the correct routee based on this: + +.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRoutingLogic + +As you can see above what's returned in the ``CustomRoute`` function, which defines the mapping +from incoming sender/message to a ``List`` of ``Destination(sender, routee)``. +The sender is what "parent" the routee should see - changing this could be useful if you for example want +another actor than the original sender to intermediate the result of the routee (if there is a result). +For more information about how to alter the original sender we refer to the source code of +`ScatterGatherFirstCompletedRouter `_ + +All in all the custom router looks like this: + +.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#CustomRouter + +If you are interested in how to use the VoteCountRouter it looks like this: + +.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crTest -We continuously strive to add and improve the documentation so you may want to have a -look at the `snapshot repository `_. -You can also get some ideas of the routing by looking at the corresponding :ref:`routing-scala` documentation. \ No newline at end of file diff --git a/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala b/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala index 63338e8357..d688da6544 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala +++ b/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala @@ -46,7 +46,7 @@ class ParentActor extends Actor { case "rrr" ⇒ //#roundRobinRouter val roundRobinRouter = - context.actorOf(Props[PrintlnActor].withRouter(RoundRobinRouter()), "router") + context.actorOf(Props[PrintlnActor].withRouter(RoundRobinRouter(5)), "router") 1 to 10 foreach { i ⇒ roundRobinRouter ! i } @@ -54,7 +54,7 @@ class ParentActor extends Actor { case "rr" ⇒ //#randomRouter val randomRouter = - context.actorOf(Props[PrintlnActor].withRouter(RandomRouter()), "router") + context.actorOf(Props[PrintlnActor].withRouter(RandomRouter(5)), "router") 1 to 10 foreach { i ⇒ randomRouter ! i } @@ -62,14 +62,14 @@ class ParentActor extends Actor { case "br" ⇒ //#broadcastRouter val broadcastRouter = - context.actorOf(Props[PrintlnActor].withRouter(BroadcastRouter()), "router") + context.actorOf(Props[PrintlnActor].withRouter(BroadcastRouter(5)), "router") broadcastRouter ! "this is a broadcast message" //#broadcastRouter case "sgfcr" ⇒ //#scatterGatherFirstCompletedRouter val scatterGatherFirstCompletedRouter = context.actorOf( - Props[FibonacciActor].withRouter(ScatterGatherFirstCompletedRouter(within = 2 seconds)), - "router") + Props[FibonacciActor].withRouter(ScatterGatherFirstCompletedRouter( + nrOfInstances = 5, within = 2 seconds)), "router") implicit val timeout = context.system.settings.ActorTimeout val futureResult = scatterGatherFirstCompletedRouter ? FibonacciNumber(10) val result = Await.result(futureResult, timeout.duration) diff --git a/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala b/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala index d0d5dd59c7..d3c3e848c2 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala +++ b/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala @@ -4,7 +4,8 @@ package akka.docs.routing import akka.actor.{ Actor, Props, ActorSystem } -import akka.routing.RoundRobinRouter +import com.typesafe.config.ConfigFactory +import akka.routing.FromConfig case class Message(nbr: Int) @@ -15,10 +16,20 @@ class ExampleActor extends Actor { } object RouterWithConfigExample extends App { - val system = ActorSystem("Example") + val config = ConfigFactory.parseString(""" + //#config + akka.actor.deployment { + /router { + router = round-robin + nr-of-instances = 5 + } + } + //#config + """) + val system = ActorSystem("Example", config) //#configurableRouting - val router = system.actorOf(Props[PrintlnActor].withRouter(RoundRobinRouter()), - "exampleActor") + val router = system.actorOf(Props[ExampleActor].withRouter(FromConfig()), + "router") //#configurableRouting 1 to 10 foreach { i ⇒ router ! Message(i) } } \ No newline at end of file diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 9adc9493f0..f68b2400a6 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -19,13 +19,41 @@ The router routes the messages sent to it to its underlying actors called 'route Akka comes with four defined routers out of the box, but as you will see in this chapter it is really easy to create your own. The four routers shipped with Akka are: -* `RoundRobinRouter `_ -* `RandomRouter `_ -* `BroadcastRouter `_ -* `ScatterGatherFirstCompletedRouter `_ +* ``akka.routing.RoundRobinRouter`` +* ``akka.routing.RandomRouter`` +* ``akka.routing.BroadcastRouter`` +* ``akka.routing.ScatterGatherFirstCompletedRouter`` -To illustrate how to use the routers we will create a couple of simple actors and then use them in the -different router types. +Routers Explained +^^^^^^^^^^^^^^^^^ + +This is an example of how to create a router that is defined in configuration: + +.. includecode:: code/akka/docs/routing/RouterViaConfigExample.scala#config + +.. includecode:: code/akka/docs/routing/RouterViaConfigExample.scala#configurableRouting + +This is an example of how to programatically create a router and set the number of routees it should create: + +.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingNrOfInstances + +You can also give the router already created routees as in: + +.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingRoutees + +When you create a router programatically you define the number of routees *or* you pass already created routees to it. +If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. + +*It is also worth pointing out that if you define the number of routees in the configuration file then this +value will be used instead of any programmatically sent parameters.* + +Once you have the router actor it is just to send messages to it as you would to any actor: + +.. code-block:: scala + + router ! MyMsg + +The router will apply its behavior to the message it receives and forward it to the routees. Router usage ^^^^^^^^^^^^ @@ -39,13 +67,6 @@ and .. includecode:: code/akka/docs/routing/RouterTypeExample.scala#fibonacciActor -Here is the configuration file to instruct the routers how many instances of routees to create:: - - akka.actor.deployment { - /router { - nr-of-instances = 5 - } - } RoundRobinRouter **************** @@ -137,34 +158,6 @@ When run you should see this: From the output above you can't really see that all the routees performed the calculation, but they did! The result you see is from the first routee that returned its calculation to the router. -Routers Explained -^^^^^^^^^^^^^^^^^ - -In the example usage above we showed you how to use routers configured with a configuration file but routers -can also be configured programatically. - -This is an example of how to create a router and set the number of routees it should create: - -.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingNrOfInstances - -You can also give the router already created routees as in: - -.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingRoutees - -When you create a router programatically you define the number of routees *or* you pass already created routees to it. -If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. - -*It is also worth pointing out that if you define the number of routees in the configuration file then this -value will be used instead of any programmatically sent parameters.* - -Once you have the router actor it is just to send messages to it as you would to any actor: - -.. code-block:: scala - - router ! MyMsg - -The router will apply its behavior to the message it receives and forward it to the routees. - Broadcast Messages ^^^^^^^^^^^^^^^^^^ @@ -193,7 +186,7 @@ We begin with defining the class: .. includecode:: ../../akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala#crRouter :exclude: crRoute -The next step is to implement the 'createRoute' method in the class just defined: +The next step is to implement the ``createRoute`` method in the class just defined: .. includecode:: ../../akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala#crRoute @@ -215,7 +208,7 @@ As you can see above what's returned in the partial function is a ``List`` of `` The sender is what "parent" the routee should see - changing this could be useful if you for example want another actor than the original sender to intermediate the result of the routee (if there is a result). For more information about how to alter the original sender we refer to the source code of -`ScatterGatherFirstCompletedRouter `_ +`ScatterGatherFirstCompletedRouter `_ All in all the custom router looks like this: From 8852128b930a568af398c983824a90f3781b6fe2 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 6 Jan 2012 23:16:59 +0100 Subject: [PATCH 12/84] #1598 - tryComplete == false for KeptPromise --- akka-actor/src/main/scala/akka/dispatch/Future.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index fea97fbaf3..afa23ec728 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -796,7 +796,7 @@ class DefaultPromise[T](implicit val executor: ExecutionContext) extends Abstrac final class KeptPromise[T](suppliedValue: Either[Throwable, T])(implicit val executor: ExecutionContext) extends Promise[T] { val value = Some(resolve(suppliedValue)) - def tryComplete(value: Either[Throwable, T]): Boolean = true + def tryComplete(value: Either[Throwable, T]): Boolean = false def onComplete(func: Either[Throwable, T] ⇒ Unit): this.type = { val completedAs = value.get Future dispatchTask (() ⇒ func(completedAs)) From bd71ebb948f84908845a4452f796c32f41eba151 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 6 Jan 2012 23:47:50 +0100 Subject: [PATCH 13/84] #1604 - Switched to ThreadLocal.remove() and dropping Optionizing things --- .../src/main/scala/akka/dispatch/Future.scala | 23 ++++++++----------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index afa23ec728..933a263732 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -294,31 +294,28 @@ object Future { */ def blocking(implicit executor: ExecutionContext): Unit = _taskStack.get match { - case Some(taskStack) if taskStack.nonEmpty ⇒ - val tasks = taskStack.elems - taskStack.clear() - _taskStack set None - dispatchTask(() ⇒ _taskStack.get.get.elems = tasks, true) - case Some(_) ⇒ _taskStack set None - case _ ⇒ // already None + case stack if (stack ne null) && stack.nonEmpty ⇒ + val tasks = stack.elems + stack.clear() + _taskStack.remove() + dispatchTask(() ⇒ _taskStack.get.elems = tasks, true) + case _ ⇒ _taskStack.remove() } - private val _taskStack = new ThreadLocal[Option[Stack[() ⇒ Unit]]]() { - override def initialValue = None - } + private val _taskStack = new ThreadLocal[Stack[() ⇒ Unit]]() /** * Internal API, do not call */ private[akka] def dispatchTask(task: () ⇒ Unit, force: Boolean = false)(implicit executor: ExecutionContext): Unit = _taskStack.get match { - case Some(taskStack) if !force ⇒ taskStack push task + case stack if (stack ne null) && !force ⇒ stack push task case _ ⇒ executor.execute( new Runnable { def run = try { val taskStack = Stack[() ⇒ Unit](task) - _taskStack set Some(taskStack) + _taskStack set taskStack while (taskStack.nonEmpty) { val next = taskStack.pop() try { @@ -334,7 +331,7 @@ object Future { } } } - } finally { _taskStack set None } + } finally { _taskStack.remove() } }) } } From 1f8f3d3e4202b3c282d05de26354e9a309eec5d9 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sat, 7 Jan 2012 00:48:41 +0100 Subject: [PATCH 14/84] Removing trailing dot madness --- akka-actor/src/main/scala/akka/util/Duration.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index d404300a49..52a7211ef2 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -325,7 +325,7 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { case Duration(x, NANOSECONDS) ⇒ x + " nanoseconds" } - def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000. % 60) + def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000f % 60) def compare(other: Duration) = if (other.finite_?) { From feccfa0e2fe623d8a6c3a6fe18a741b011470b82 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sat, 7 Jan 2012 00:49:35 +0100 Subject: [PATCH 15/84] Removing trailing dot madness --- akka-actor/src/main/scala/akka/util/Duration.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index 52a7211ef2..1406ad8564 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -325,7 +325,7 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { case Duration(x, NANOSECONDS) ⇒ x + " nanoseconds" } - def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000f % 60) + def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000d % 60) def compare(other: Duration) = if (other.finite_?) { From 3f8f8632ccf87e4d2fbbe498f1770ff1b4031664 Mon Sep 17 00:00:00 2001 From: RayRoestenburg Date: Sat, 7 Jan 2012 16:53:44 +0100 Subject: [PATCH 16/84] Added docs to registerOnTermination, that you can add multiple code blocks --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 0e95325ec3..aafbe1f0e3 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -234,13 +234,15 @@ abstract class ActorSystem extends ActorRefFactory { /** * Register a block of code to run after all actors in this actor system have - * been stopped. + * been stopped. Multiple code blocks may be registered by calling this method multiple times; there is no + * guarantee that they will be executed in a particular order. */ def registerOnTermination[T](code: ⇒ T) /** * Register a block of code to run after all actors in this actor system have - * been stopped (Java API). + * been stopped. Multiple code blocks may be registered by calling this method multiple times; there is no + * guarantee that they will be executed in a particular order (Java API). */ def registerOnTermination(code: Runnable) From 1c6761c811c7717a7f558fdafda0e161fa25ef2d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 9 Jan 2012 09:25:02 +0100 Subject: [PATCH 17/84] Added explicit type of def context in TypedActor --- akka-actor/src/main/scala/akka/actor/TypedActor.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 3dc5d4c000..c1cefd8153 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -259,7 +259,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi /** * Returns the ActorContext (for a TypedActor) when inside a method call in a TypedActor. */ - def context = currentContext.get match { + def context: ActorContext = currentContext.get match { case null ⇒ throw new IllegalStateException("Calling TypedActor.context outside of a TypedActor implementation method!") case some ⇒ some } From 07b27ba3de4386f0e918eaabee51cd40548f9ffe Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 9 Jan 2012 13:50:54 +0100 Subject: [PATCH 18/84] Fixing a logic error inte the ActiveRemoteClient --- .../scala/akka/remote/netty/NettyRemoteSupport.scala | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 24ae131a29..dafb703e41 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -162,15 +162,9 @@ class ActiveRemoteClient private[akka] ( connection.getChannel.write(remoteSupport.createControlEnvelope(handshake.build)) } - def closeChannel(connection: ChannelFuture) = { - val channel = connection.getChannel - openChannels.remove(channel) - channel.close() - } - def attemptReconnect(): Boolean = { log.debug("Remote client reconnecting to [{}]", remoteAddress) - val connection = bootstrap.connect(new InetSocketAddress(remoteAddress.ip.get, remoteAddress.port)) + connection = bootstrap.connect(new InetSocketAddress(remoteAddress.ip.get, remoteAddress.port)) openChannels.add(connection.awaitUninterruptibly.getChannel) // Wait until the connection attempt succeeds or fails. if (!connection.isSuccess) { @@ -210,7 +204,8 @@ class ActiveRemoteClient private[akka] ( } match { case true ⇒ true case false if reconnectIfAlreadyConnected ⇒ - closeChannel(connection) + openChannels.remove(connection.getChannel) + connection.getChannel.close() log.debug("Remote client reconnecting to [{}]", remoteAddress) attemptReconnect() From 6ba26f5f86b09c9ee2600d4b42fa43901d706b29 Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 9 Jan 2012 14:09:24 +0100 Subject: [PATCH 19/84] fix trivial off-by-one aesthetic buglet: start random names at $a instead of $b --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index c9133e2235..eaad8d0610 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -268,8 +268,8 @@ private[akka] class ActorCell( //Not thread safe, so should only be used inside the actor that inhabits this ActorCell final protected def randomName(): String = { - val n = nextNameSequence + 1 - nextNameSequence = n + val n = nextNameSequence + nextNameSequence = n + 1 Helpers.base64(n) } From 602a036194a81fb17aa0f0735933c1e9a7b564ce Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 9 Jan 2012 14:13:52 +0100 Subject: [PATCH 20/84] Removing the ChannelGroup from the ActiveRemoteClient since it's only going to have one channel at a time anyway --- .../remote/netty/NettyRemoteSupport.scala | 24 +++++-------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index dafb703e41..98c6c99695 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -132,8 +132,6 @@ class ActiveRemoteClient private[akka] ( private var bootstrap: ClientBootstrap = _ @volatile private[remote] var connection: ChannelFuture = _ - @volatile - private[remote] var openChannels: DefaultChannelGroup = _ @volatile private var reconnectionTimeWindowStart = 0L @@ -165,7 +163,7 @@ class ActiveRemoteClient private[akka] ( def attemptReconnect(): Boolean = { log.debug("Remote client reconnecting to [{}]", remoteAddress) connection = bootstrap.connect(new InetSocketAddress(remoteAddress.ip.get, remoteAddress.port)) - openChannels.add(connection.awaitUninterruptibly.getChannel) // Wait until the connection attempt succeeds or fails. + connection.awaitUninterruptibly.getChannel // Wait until the connection attempt succeeds or fails. if (!connection.isSuccess) { notifyListeners(RemoteClientError(connection.getCause, remoteSupport, remoteAddress)) @@ -177,8 +175,6 @@ class ActiveRemoteClient private[akka] ( } runSwitch switchOn { - openChannels = new DefaultDisposableChannelGroup(classOf[RemoteClient].getName) - executionHandler = new ExecutionHandler(remoteSupport.executor) bootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool)) @@ -189,9 +185,7 @@ class ActiveRemoteClient private[akka] ( log.debug("Starting remote client connection to [{}]", remoteAddress) connection = bootstrap.connect(new InetSocketAddress(remoteAddress.ip.get, remoteAddress.port)) - - val channel = connection.awaitUninterruptibly.getChannel - openChannels.add(channel) + connection.awaitUninterruptibly.getChannel // Wait until the connection attempt succeeds or fails. if (!connection.isSuccess) { notifyListeners(RemoteClientError(connection.getCause, remoteSupport, remoteAddress)) @@ -204,7 +198,6 @@ class ActiveRemoteClient private[akka] ( } match { case true ⇒ true case false if reconnectIfAlreadyConnected ⇒ - openChannels.remove(connection.getChannel) connection.getChannel.close() log.debug("Remote client reconnecting to [{}]", remoteAddress) @@ -219,13 +212,12 @@ class ActiveRemoteClient private[akka] ( log.debug("Shutting down remote client [{}]", name) notifyListeners(RemoteClientShutdown(remoteSupport, remoteAddress)) - openChannels.close.awaitUninterruptibly - openChannels = null + connection.getChannel.close() + connection = null executionHandler.releaseExternalResources() executionHandler = null bootstrap.releaseExternalResources() bootstrap = null - connection = null log.debug("[{}] has been shut down", name) } @@ -321,12 +313,8 @@ class ActiveRemoteClientHandler( override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = client.runSwitch ifOn { if (client.isWithinReconnectionTimeWindow) { timer.newTimeout(new TimerTask() { - def run(timeout: Timeout) = { - if (client.isRunning) { - client.openChannels.remove(event.getChannel) - client.connect(reconnectIfAlreadyConnected = true) - } - } + def run(timeout: Timeout) = + if (client.isRunning) client.connect(reconnectIfAlreadyConnected = true) }, client.remoteSupport.clientSettings.ReconnectDelay.toMillis, TimeUnit.MILLISECONDS) } else runOnceNow { client.remoteSupport.shutdownClientConnection(remoteAddress) // spawn in another thread From 442d597b81e70539ab2167bf25de52160b5d2c36 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 9 Jan 2012 14:31:24 +0100 Subject: [PATCH 21/84] Avoiding to releaseExternalResources on the ExecutionHandler --- .../remote/netty/NettyRemoteSupport.scala | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 98c6c99695..2ebe26db50 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -212,12 +212,19 @@ class ActiveRemoteClient private[akka] ( log.debug("Shutting down remote client [{}]", name) notifyListeners(RemoteClientShutdown(remoteSupport, remoteAddress)) - connection.getChannel.close() - connection = null - executionHandler.releaseExternalResources() - executionHandler = null - bootstrap.releaseExternalResources() - bootstrap = null + try { + if (connection.getChannel ne null) + connection.getChannel.close() + } finally { + connection = null + executionHandler = null + //Do not do this: executionHandler.releaseExternalResources(), since it's shutting down the shared threadpool + try { + bootstrap.releaseExternalResources() + } finally { + bootstrap = null + } + } log.debug("[{}] has been shut down", name) } From f029b8dc914f746b5415cc08220bad9722ce7f8b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 9 Jan 2012 14:40:57 +0100 Subject: [PATCH 22/84] Adding null-checks in shutdown of ActiveRemoteClient --- .../src/main/scala/akka/remote/netty/NettyRemoteSupport.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 2ebe26db50..d6d2698526 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -213,7 +213,7 @@ class ActiveRemoteClient private[akka] ( notifyListeners(RemoteClientShutdown(remoteSupport, remoteAddress)) try { - if (connection.getChannel ne null) + if ((connection ne null) && (connection.getChannel ne null)) connection.getChannel.close() } finally { connection = null From 9c6a856df2e67ce4a400a1bb85867832f094c048 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 9 Jan 2012 14:45:29 +0100 Subject: [PATCH 23/84] Avoiding 'releaseExternalResources' for the Remote Server's executionHandler --- .../src/main/scala/akka/remote/netty/NettyRemoteSupport.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index d6d2698526..4624c9dc73 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -565,7 +565,6 @@ class NettyRemoteServer( openChannels.disconnect openChannels.close.awaitUninterruptibly bootstrap.releaseExternalResources() - executionHandler.releaseExternalResources() remoteSupport.notifyListeners(RemoteServerShutdown(remoteSupport)) } catch { case e: Exception ⇒ remoteSupport.notifyListeners(RemoteServerError(e, remoteSupport)) From 8abcf8ce2a1ca080dcb316dbc7adc6c149ef71d6 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 9 Jan 2012 20:50:18 +0100 Subject: [PATCH 24/84] Avoid AtomicInteger overflow in RoundRobinRouter. See #1610 --- akka-actor/src/main/scala/akka/routing/Routing.scala | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index d1cf7d664f..5ee7ca76d1 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -4,7 +4,7 @@ package akka.routing import akka.actor._ -import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicLong import akka.util.{ Duration, Timeout } import akka.config.ConfigurationException import scala.collection.JavaConversions.iterableAsScalaIterable @@ -264,10 +264,11 @@ trait RoundRobinLike { this: RouterConfig ⇒ def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = { createAndRegisterRoutees(props, context, nrOfInstances, routees) - val next = new AtomicInteger(0) + val next = new AtomicLong(0) def getNext(): ActorRef = { - ref.routees(next.getAndIncrement % ref.routees.size) + val _routees = ref.routees + _routees((next.getAndIncrement % _routees.size).asInstanceOf[Int]) } { From 8b71bf5bea435cc0ef2caa68a3e6afa4ac01da77 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 9 Jan 2012 20:25:24 +0100 Subject: [PATCH 25/84] Implemented dynamically resizable routers. See #1557 --- .../test/scala/akka/actor/DeployerSpec.scala | 26 +- .../scala/akka/routing/RouterPoolSpec.scala | 93 ++++++ akka-actor/src/main/resources/reference.conf | 12 + .../src/main/scala/akka/actor/Deployer.scala | 22 +- .../src/main/scala/akka/routing/Routing.scala | 268 ++++++++++++++++-- 5 files changed, 392 insertions(+), 29 deletions(-) create mode 100644 akka-actor-tests/src/test/scala/akka/routing/RouterPoolSpec.scala diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala index 92e1f4f3ab..faae7350b6 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala @@ -38,6 +38,13 @@ object DeployerSpec { router = scatter-gather within = 2 seconds } + /service-pool { + router = round-robin + pool { + lower-bound = 1 + upper-bound = 10 + } + } } """, ConfigParseOptions.defaults) @@ -121,18 +128,19 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { assertRouting(ScatterGatherFirstCompletedRouter(nrOfInstances = 1, within = 2 seconds), "/service-scatter-gather") } + "be able to parse 'akka.actor.deployment._' with router pool" in { + val pool = DefaultRouterPool() + assertRouting(RoundRobinRouter(pool = Some(pool)), "/service-pool") + } + def assertRouting(expected: RouterConfig, service: String) { val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service) deployment must be('defined) - - deployment must be(Some( - Deploy( - service, - deployment.get.config, - None, - expected, - LocalScope))) - + deployment.get.path must be(service) + deployment.get.recipe must be(None) + deployment.get.routing.getClass must be(expected.getClass) + deployment.get.routing.pool must be(expected.pool) + deployment.get.scope must be(LocalScope) } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RouterPoolSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RouterPoolSpec.scala new file mode 100644 index 0000000000..3ca71e3180 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/routing/RouterPoolSpec.scala @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.routing + +import akka.actor.Actor +import akka.testkit.AkkaSpec +import akka.testkit.DefaultTimeout +import akka.testkit.ImplicitSender +import akka.testkit.TestLatch +import akka.actor.Props +import akka.dispatch.Await +import akka.util.duration._ +import akka.actor.ActorRef + +object RouterPoolSpec { + + val config = """ + akka.actor.deployment { + /router1 { + router = round-robin + pool { + lower-bound = 2 + upper-bound = 3 + } + } + } + """ + + class TestActor extends Actor { + def receive = { + case latch: TestLatch ⇒ latch.countDown() + } + } + +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class RouterPoolSpec extends AkkaSpec(RouterPoolSpec.config) with DefaultTimeout with ImplicitSender { + + import akka.routing.RouterPoolSpec._ + + "DefaultRouterPool" must { + + "use settings to evaluate capacity" in { + val pool = DefaultRouterPool( + lowerBound = 2, + upperBound = 3) + + val c1 = pool.capacity(IndexedSeq.empty[ActorRef]) + c1 must be(2) + + val current = IndexedSeq(system.actorOf(Props[TestActor]), system.actorOf(Props[TestActor])) + val c2 = pool.capacity(current) + c2 must be(0) + } + + "be possible to define programatically" in { + val latch = new TestLatch(3) + + val pool = DefaultRouterPool( + lowerBound = 2, + upperBound = 3) + val router = system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(pool = Some(pool)))) + + router ! latch + router ! latch + router ! latch + + Await.ready(latch, 5 seconds) + + val current = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees] + current.routees.size must be(2) + } + + "be possible to define in configuration" in { + val latch = new TestLatch(3) + + val router = system.actorOf(Props[TestActor].withRouter(FromConfig()), "router1") + + router ! latch + router ! latch + router ! latch + + Await.ready(latch, 5 seconds) + + val current = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees] + current.routees.size must be(2) + } + + } + +} diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 236eadc579..7d4800e807 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -97,6 +97,18 @@ akka { # precedence over nr-of-instances paths = [] } + + # FIXME document pool settings + pool { + lower-bound = 1 + upper-bound = 10 + pressure-threshold = 3 + rampup-rate = 0.2 + backoff-threshold = 0.7 + backoff-rate = 0.1 + # When the pool shrink the abandoned actors are stopped with PoisonPill after this delay + stop-delay = 1 second + } } } diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 988d6bf126..48475169ee 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -55,12 +55,26 @@ class Deployer(val settings: ActorSystem.Settings) { val within = Duration(deployment.getMilliseconds("within"), TimeUnit.MILLISECONDS) + val pool: Option[RouterPool] = if (config.hasPath("pool")) { + val poolConfig = deployment.getConfig("pool") + Some(DefaultRouterPool( + lowerBound = poolConfig.getInt("lower-bound"), + upperBound = poolConfig.getInt("upper-bound"), + pressureThreshold = poolConfig.getInt("pressure-threshold"), + rampupRate = poolConfig.getDouble("rampup-rate"), + backoffThreshold = poolConfig.getDouble("backoff-threshold"), + backoffRate = poolConfig.getDouble("backoff-rate"), + stopDelay = Duration(poolConfig.getMilliseconds("stop-delay"), TimeUnit.MILLISECONDS))) + } else { + None + } + val router: RouterConfig = deployment.getString("router") match { case "from-code" ⇒ NoRouter - case "round-robin" ⇒ RoundRobinRouter(nrOfInstances, routees) - case "random" ⇒ RandomRouter(nrOfInstances, routees) - case "scatter-gather" ⇒ ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within) - case "broadcast" ⇒ BroadcastRouter(nrOfInstances, routees) + case "round-robin" ⇒ RoundRobinRouter(nrOfInstances, routees, pool) + case "random" ⇒ RandomRouter(nrOfInstances, routees, pool) + case "scatter-gather" ⇒ ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within, pool) + case "broadcast" ⇒ BroadcastRouter(nrOfInstances, routees, pool) case x ⇒ throw new ConfigurationException("unknown router type " + x + " for path " + key) } diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 5ee7ca76d1..2e4e01992e 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -4,8 +4,11 @@ package akka.routing import akka.actor._ +import akka.dispatch.Future import java.util.concurrent.atomic.AtomicLong +import java.util.concurrent.atomic.AtomicBoolean import akka.util.{ Duration, Timeout } +import akka.util.duration._ import akka.config.ConfigurationException import scala.collection.JavaConversions.iterableAsScalaIterable @@ -20,11 +23,24 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _supervisor, _path) { + private val routeeProps = _props.copy(routerConfig = NoRouter) + @volatile - private[akka] var _routees: IndexedSeq[ActorRef] = _ // this MUST be initialized during createRoute + private var _routees: IndexedSeq[ActorRef] = IndexedSeq.empty[ActorRef] // this MUST be initialized during createRoute def routees = _routees - val route = _props.routerConfig.createRoute(_props.copy(routerConfig = NoRouter), actorContext, this) + def addRoutees(newRoutees: IndexedSeq[ActorRef]) { + _routees = _routees ++ newRoutees + // subscribe to Terminated messages for all route destinations, to be handled by Router actor + newRoutees foreach underlying.watch + } + + def removeRoutees(abandonedRoutees: IndexedSeq[ActorRef]) { + _routees = _routees filterNot (x ⇒ abandonedRoutees.contains(x)) + abandonedRoutees foreach underlying.unwatch + } + + val route = _props.routerConfig.createRoute(routeeProps, actorContext, this) def applyRoute(sender: ActorRef, message: Any): Iterable[Destination] = message match { case _: AutoReceivedMessage ⇒ Nil @@ -37,15 +53,16 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup else Nil } + if (_props.routerConfig.pool.isEmpty && _routees.isEmpty) + throw new ActorInitializationException("router " + _props.routerConfig + " did not register routees!") + _routees match { - case null ⇒ throw new ActorInitializationException("router " + _props.routerConfig + " did not register routees!") - case x ⇒ - _routees = x // volatile write to publish the route before sending messages - // subscribe to Terminated messages for all route destinations, to be handled by Router actor - _routees foreach underlying.watch + case x ⇒ _routees = x // volatile write to publish the route before sending messages } override def !(message: Any)(implicit sender: ActorRef = null): Unit = { + _props.routerConfig.resizePool(routeeProps, actorContext, routees) + val s = if (sender eq null) underlying.system.deadLetters else sender val msg = message match { @@ -58,6 +75,11 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup case refs ⇒ refs foreach (p ⇒ p.recipient.!(msg)(p.sender)) } } + + override def ?(message: Any)(implicit timeout: Timeout): Future[Any] = { + _props.routerConfig.resizePool(routeeProps, actorContext, routees) + super.?(message)(timeout) + } } /** @@ -94,18 +116,47 @@ trait RouterConfig { protected def toAll(sender: ActorRef, routees: Iterable[ActorRef]): Iterable[Destination] = routees.map(Destination(sender, _)) - protected def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { + def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { case (0, Nil) ⇒ throw new IllegalArgumentException("Insufficient information - missing configuration.") case (x, Nil) ⇒ (1 to x).map(_ ⇒ context.actorOf(props))(scala.collection.breakOut) case (_, xs) ⇒ xs.map(context.actorFor(_))(scala.collection.breakOut) } protected def createAndRegisterRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): Unit = { - registerRoutees(context, createRoutees(props, context, nrOfInstances, routees)) + pool match { + case None ⇒ registerRoutees(context, createRoutees(props, context, nrOfInstances, routees)) + case Some(p) ⇒ resizePool(props, context, context.self.asInstanceOf[RoutedActorRef].routees) + } } - protected def registerRoutees(context: ActorContext, routees: IndexedSeq[ActorRef]): Unit = { - context.self.asInstanceOf[RoutedActorRef]._routees = routees + /** + * Adds new routees to the router. + */ + def registerRoutees(context: ActorContext, routees: IndexedSeq[ActorRef]): Unit = { + context.self.asInstanceOf[RoutedActorRef].addRoutees(routees) + } + + /** + * Removes routees from the router. This method doesn't stop the routees. + */ + def unregisterRoutees(context: ActorContext, routees: IndexedSeq[ActorRef]): Unit = { + context.self.asInstanceOf[RoutedActorRef].removeRoutees(routees) + } + + def pool: Option[RouterPool] = None + + private val resizePoolInProgress = new AtomicBoolean + + def resizePool(props: Props, context: ActorContext, currentRoutees: IndexedSeq[ActorRef]) { + for (p ← pool) { + if (resizePoolInProgress.compareAndSet(false, true)) { + try { + p.resize(props, context, currentRoutees, this) + } finally { + resizePoolInProgress.set(false) + } + } + } } } @@ -151,7 +202,7 @@ trait Router extends Actor { final def receive = ({ case Terminated(child) ⇒ - ref._routees = ref._routees filterNot (_ == child) + ref.removeRoutees(IndexedSeq(child)) if (ref.routees.isEmpty) context.stop(self) }: Receive) orElse routerReceive @@ -236,7 +287,8 @@ object RoundRobinRouter { * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil) extends RouterConfig with RoundRobinLike { +case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val pool: Option[RouterPool] = None) + extends RouterConfig with RoundRobinLike { /** * Constructor that sets nrOfInstances to be created. @@ -253,6 +305,12 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = def this(t: java.lang.Iterable[String]) = { this(routees = iterableAsScalaIterable(t)) } + + /** + * Constructor that sets the pool to be used. + * Java API + */ + def this(pool: RouterPool) = this(pool = Some(pool)) } trait RoundRobinLike { this: RouterConfig ⇒ @@ -303,7 +361,8 @@ object RandomRouter { * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil) extends RouterConfig with RandomLike { +case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val pool: Option[RouterPool] = None) + extends RouterConfig with RandomLike { /** * Constructor that sets nrOfInstances to be created. @@ -320,6 +379,12 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil) def this(t: java.lang.Iterable[String]) = { this(routees = iterableAsScalaIterable(t)) } + + /** + * Constructor that sets the pool to be used. + * Java API + */ + def this(pool: RouterPool) = this(pool = Some(pool)) } trait RandomLike { this: RouterConfig ⇒ @@ -373,7 +438,8 @@ object BroadcastRouter { * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil) extends RouterConfig with BroadcastLike { +case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val pool: Option[RouterPool] = None) + extends RouterConfig with BroadcastLike { /** * Constructor that sets nrOfInstances to be created. @@ -390,6 +456,13 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N def this(t: java.lang.Iterable[String]) = { this(routees = iterableAsScalaIterable(t)) } + + /** + * Constructor that sets the pool to be used. + * Java API + */ + def this(pool: RouterPool) = this(pool = Some(pool)) + } trait BroadcastLike { this: RouterConfig ⇒ @@ -432,7 +505,8 @@ object ScatterGatherFirstCompletedRouter { * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration) +case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration, + override val pool: Option[RouterPool] = None) extends RouterConfig with ScatterGatherFirstCompletedLike { /** @@ -450,6 +524,12 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It def this(t: java.lang.Iterable[String], w: Duration) = { this(routees = iterableAsScalaIterable(t), within = w) } + + /** + * Constructor that sets the pool to be used. + * Java API + */ + def this(pool: RouterPool, w: Duration) = this(pool = Some(pool), within = w) } trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ @@ -473,3 +553,159 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ } } } + +/** + * Routers with dynamically resizable number of routees is implemented by providing a pool + * implementation in [[akka.routing.RouterConfig]]. When the resize method is invoked you can + * create and register more routees with `routerConfig.registerRoutees(actorContext, newRoutees) + * or remove routees with `routerConfig.unregisterRoutees(actorContext, abandonedRoutees)` and + * sending [[akka.actor.PoisonPill]] to them. + */ +trait RouterPool { + def resize(props: Props, actorContext: ActorContext, currentRoutees: IndexedSeq[ActorRef], routerConfig: RouterConfig) +} + +case class DefaultRouterPool( + /** + * The fewest number of routees the pool should ever have + */ + lowerBound: Int = 1, + /** + * The most number of routees the pool should ever have + */ + upperBound: Int = 10, + /** + * A routee is considered to be busy (under pressure) when + * it has at least this number of messages in its mailbox. + * When pressureThreshold is defined as 0 the routee + * is considered busy when it is currently processing a + * message. + */ + pressureThreshold: Int = 3, + /** + * Percentage to increase capacity whenever all routees are busy. + * For example, 0.2 would increase 20%, etc. + */ + rampupRate: Double = 0.2, + /** + * Fraction of capacity the pool has to fall below before backing off. + * For example, if this is 0.7, then we'll remove some routees when + * less than 70% of routees are busy. + * Use 0.0 to avoid removal of routees. + */ + backoffThreshold: Double = 0.7, + /** + * Fraction of routees to be removed when the pool reaches the + * backoffThreshold. + * Use 0.0 to avoid removal of routees. + */ + backoffRate: Double = 0.1, + /** + * When the pool shrink the abandoned actors are stopped with PoisonPill after this delay + */ + stopDelay: Duration = 1.second) extends RouterPool { + + def resize(props: Props, actorContext: ActorContext, currentRoutees: IndexedSeq[ActorRef], routerConfig: RouterConfig) { + val requestedCapacity = capacity(currentRoutees) + + if (requestedCapacity > 0) { + val newRoutees = routerConfig.createRoutees(props, actorContext, requestedCapacity, Nil) + routerConfig.registerRoutees(actorContext, newRoutees) + } else if (requestedCapacity < 0) { + val (keep, abandon) = currentRoutees.splitAt(currentRoutees.length + requestedCapacity) + routerConfig.unregisterRoutees(actorContext, abandon) + delayedStop(actorContext.system.scheduler, abandon) + } + } + + /** + * Give concurrent messages a chance to be placed in mailbox before + * sending PoisonPill. + */ + protected def delayedStop(scheduler: Scheduler, abandon: IndexedSeq[ActorRef]) { + scheduler.scheduleOnce(stopDelay) { + abandon foreach (_ ! PoisonPill) + } + } + + /** + * Returns the overall desired change in pool capacity. Positive value will + * add routees to the pool. Negative value will remove routees from the + * pool. + * @param routees The current actor in the pool + * @return the number of routees by which the pool should be adjusted (positive, negative or zero) + */ + def capacity(routees: IndexedSeq[ActorRef]): Int = { + val currentSize = routees.size + val delta = filter(pressure(routees), currentSize) + val proposed = currentSize + delta + + if (proposed < lowerBound) delta + (lowerBound - proposed) + else if (proposed > upperBound) delta - (proposed - upperBound) + else delta + } + + /** + * Number of routees considered busy, or above 'pressure level'. + * + * Default implementation: + * When `pressureThreshold` > 0 the number of routees with at least + * the configured `pressureThreshold` messages in their mailbox, + * otherwise number of routees currently processing a + * message. + * + * @param routees the current pool of routees + * @return number of busy routees, between 0 and routees.size + */ + def pressure(routees: Seq[ActorRef]): Int = { + if (pressureThreshold > 0) { + routees count { + case a: LocalActorRef ⇒ a.underlying.mailbox.numberOfMessages >= pressureThreshold + case _ ⇒ false + } + } else { + routees count { + case a: LocalActorRef ⇒ + val cell = a.underlying + cell.mailbox.isScheduled && cell.currentMessage != null + case _ ⇒ false + } + } + } + + /** + * This method can be used to smooth the capacity delta by considering + * the current pressure and current capacity. + * + * @param pressure current number of busy routees + * @param capacity current number of routees + * @return proposed change in the capacity + */ + def filter(pressure: Int, capacity: Int): Int = { + rampup(pressure, capacity) + backoff(pressure, capacity) + } + + /** + * Computes a proposed positive (or zero) capacity delta using + * the configured `rampupRate`. + * @param pressure the current number of busy routees + * @param capacity the current number of total routees + * @return proposed increase in capacity + */ + def rampup(pressure: Int, capacity: Int): Int = + if (pressure < capacity) 0 else math.ceil(rampupRate * capacity) toInt + + /** + * Computes a proposed negative (or zero) capacity delta using + * the configured `backoffThreshold` and `backoffRate` + * @param pressure the current number of busy routees + * @param capacity the current number of total routees + * @return proposed decrease in capacity (as a negative number) + */ + def backoff(pressure: Int, capacity: Int): Int = + if (backoffThreshold > 0.0 && backoffRate > 0.0 && capacity > 0 && pressure.toDouble / capacity < backoffThreshold) + math.ceil(-1.0 * backoffRate * capacity) toInt + else 0 + +} + From 6fddb87fcaf38d8fa30bbf31c1d1496617436af3 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 10 Jan 2012 13:33:57 +0100 Subject: [PATCH 26/84] fix one spurious buglet in CallingThreadDispatcher, see #1375 --- .../main/scala/akka/dispatch/Mailbox.scala | 35 +++++++++--------- .../src/main/scala/akka/util/LockUtil.scala | 5 +++ .../testkit/CallingThreadDispatcher.scala | 36 +++++++++++-------- 3 files changed, 45 insertions(+), 31 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index bb0f845aba..3abd961d0f 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -227,27 +227,28 @@ private[akka] abstract class Mailbox(val actor: ActorCell) extends MessageQueue * called when an actor is unregistered. * By default it dequeues all system messages + messages and ships them to the owning actors' systems' DeadLetterMailbox */ - protected[dispatch] def cleanUp(): Unit = if (actor ne null) { - val dlq = actor.systemImpl.deadLetterMailbox - if (hasSystemMessages) { - var message = systemDrain() - while (message ne null) { - // message must be “virgin” before being able to systemEnqueue again - val next = message.next - message.next = null - dlq.systemEnqueue(actor.self, message) - message = next + protected[dispatch] def cleanUp(): Unit = + if (actor ne null) { // actor is null for the deadLetterMailbox + val dlq = actor.systemImpl.deadLetterMailbox + if (hasSystemMessages) { + var message = systemDrain() + while (message ne null) { + // message must be “virgin” before being able to systemEnqueue again + val next = message.next + message.next = null + dlq.systemEnqueue(actor.self, message) + message = next + } } - } - if (hasMessages) { - var envelope = dequeue - while (envelope ne null) { - dlq.enqueue(actor.self, envelope) - envelope = dequeue + if (hasMessages) { + var envelope = dequeue + while (envelope ne null) { + dlq.enqueue(actor.self, envelope) + envelope = dequeue + } } } - } } trait MessageQueue { diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index e17507d427..65bcf563fc 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -148,6 +148,11 @@ class Switch(startAsOn: Boolean = false) { if (switch.get) on else off } + /** + * Executes the given code while holding this switch’s lock, i.e. protected from concurrent modification of the switch status. + */ + def locked[T](code: ⇒ T) = synchronized { code } + /** * Returns whether the switch is IMMEDIATELY on (no locking) */ diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index 947ae4e262..191901b4ee 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -133,11 +133,6 @@ class CallingThreadDispatcher( protected[akka] override def createMailbox(actor: ActorCell) = new CallingThreadMailbox(actor) - private def getMailbox(actor: ActorCell): Option[CallingThreadMailbox] = actor.mailbox match { - case m: CallingThreadMailbox ⇒ Some(m) - case _ ⇒ None - } - protected[akka] override def shutdown() {} protected[akka] override def throughput = 0 @@ -147,7 +142,10 @@ class CallingThreadDispatcher( protected[akka] override def shutdownTimeout = 1 second override def suspend(actor: ActorCell) { - getMailbox(actor) foreach (_.suspendSwitch.switchOn) + actor.mailbox match { + case m: CallingThreadMailbox ⇒ m.suspendSwitch.switchOn + case m ⇒ m.systemEnqueue(actor.self, Suspend()) + } } override def resume(actor: ActorCell) { @@ -187,12 +185,10 @@ class CallingThreadDispatcher( false } { queue.push(handle) - if (queue.isActive) - false - else { + if (!queue.isActive) { queue.enter true - } + } else false } if (execute) runQueue(mbox, queue) case m ⇒ m.enqueue(receiver.self, handle) @@ -214,14 +210,14 @@ class CallingThreadDispatcher( private def runQueue(mbox: CallingThreadMailbox, queue: NestingQueue, interruptedex: InterruptedException = null) { var intex = interruptedex; assert(queue.isActive) - mbox.lock.lock + mbox.ctdLock.lock val recurse = try { mbox.processAllSystemMessages() val handle = mbox.suspendSwitch.fold[Envelope] { queue.leave null } { - val ret = queue.pop + val ret = if (mbox.isClosed) null else queue.pop if (ret eq null) queue.leave ret } @@ -248,7 +244,7 @@ class CallingThreadDispatcher( } catch { case e ⇒ queue.leave; throw e } finally { - mbox.lock.unlock + mbox.ctdLock.unlock } if (recurse) { runQueue(mbox, queue, intex) @@ -295,11 +291,23 @@ class CallingThreadMailbox(_receiver: ActorCell) extends Mailbox(_receiver) with def queue = q.get - val lock = new ReentrantLock + val ctdLock = new ReentrantLock val suspendSwitch = new Switch override def enqueue(receiver: ActorRef, msg: Envelope) {} override def dequeue() = null override def hasMessages = queue.isEmpty override def numberOfMessages = queue.size + + override def cleanUp(): Unit = { + /* + * This is called from dispatcher.unregister, i.e. under this.lock. If + * another thread obtained a reference to this mailbox and enqueues after + * the gather operation, tough luck: no guaranteed delivery to deadLetters. + */ + suspendSwitch.locked { + CallingThreadDispatcherQueues(actor.system).gatherFromAllOtherQueues(this, queue) + super.cleanUp() + } + } } From 19845d93e8e244cb07d27c40ad61e9d6ebd59b43 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 10 Jan 2012 15:53:27 +0100 Subject: [PATCH 27/84] Improvements and finalization of dynamically resizable routers, replaces ActorPool. See 1557 * resize on nth message instead of always each message * improved pressure evaluation * more tests * documentation * removed ActorPool --- .../test/scala/akka/actor/DeployerSpec.scala | 12 +- .../scala/akka/routing/ActorPoolSpec.scala | 361 ------------- .../test/scala/akka/routing/ResizerSpec.scala | 249 +++++++++ .../scala/akka/routing/RouterPoolSpec.scala | 93 ---- .../scala/akka/ticket/Ticket703Spec.scala | 34 -- akka-actor/src/main/resources/reference.conf | 50 +- .../src/main/scala/akka/actor/Deployer.scala | 29 +- .../src/main/scala/akka/routing/Pool.scala | 492 ------------------ .../src/main/scala/akka/routing/Routing.scala | 205 +++++--- .../docs/jrouting/RouterViaConfigExample.java | 7 + .../jrouting/RouterViaProgramExample.java | 11 + akka-docs/java/routing.rst | 25 + .../project/migration-guide-1.3.x-2.0.x.rst | 5 + .../akka/docs/routing/ActorPoolExample.scala | 24 - .../routing/BoundedCapacitorExample.scala | 26 - .../routing/CapacityStrategyExample.scala | 19 - .../docs/routing/RouterViaConfigExample.scala | 17 + .../routing/RouterViaProgramExample.scala | 9 + akka-docs/scala/routing.rst | 93 +--- .../scala/akka/remote/RemoteDeployer.scala | 8 +- .../scala/akka/routing/RemoteRouters.scala | 48 +- 21 files changed, 591 insertions(+), 1226 deletions(-) delete mode 100644 akka-actor-tests/src/test/scala/akka/routing/ActorPoolSpec.scala create mode 100644 akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala delete mode 100644 akka-actor-tests/src/test/scala/akka/routing/RouterPoolSpec.scala delete mode 100644 akka-actor-tests/src/test/scala/akka/ticket/Ticket703Spec.scala delete mode 100644 akka-actor/src/main/scala/akka/routing/Pool.scala delete mode 100644 akka-docs/scala/code/akka/docs/routing/ActorPoolExample.scala delete mode 100644 akka-docs/scala/code/akka/docs/routing/BoundedCapacitorExample.scala delete mode 100644 akka-docs/scala/code/akka/docs/routing/CapacityStrategyExample.scala diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala index faae7350b6..404fcf5acb 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala @@ -38,9 +38,9 @@ object DeployerSpec { router = scatter-gather within = 2 seconds } - /service-pool { + /service-resizer { router = round-robin - pool { + resizer { lower-bound = 1 upper-bound = 10 } @@ -128,9 +128,9 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { assertRouting(ScatterGatherFirstCompletedRouter(nrOfInstances = 1, within = 2 seconds), "/service-scatter-gather") } - "be able to parse 'akka.actor.deployment._' with router pool" in { - val pool = DefaultRouterPool() - assertRouting(RoundRobinRouter(pool = Some(pool)), "/service-pool") + "be able to parse 'akka.actor.deployment._' with router resizer" in { + val resizer = DefaultResizer() + assertRouting(RoundRobinRouter(resizer = Some(resizer)), "/service-resizer") } def assertRouting(expected: RouterConfig, service: String) { @@ -139,7 +139,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { deployment.get.path must be(service) deployment.get.recipe must be(None) deployment.get.routing.getClass must be(expected.getClass) - deployment.get.routing.pool must be(expected.pool) + deployment.get.routing.resizer must be(expected.resizer) deployment.get.scope must be(LocalScope) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/ActorPoolSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ActorPoolSpec.scala deleted file mode 100644 index f18fd2e5e1..0000000000 --- a/akka-actor-tests/src/test/scala/akka/routing/ActorPoolSpec.scala +++ /dev/null @@ -1,361 +0,0 @@ -package akka.routing - -import akka.actor._ -import akka.testkit._ -import akka.util.duration._ -import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger } -import akka.testkit.AkkaSpec -import akka.dispatch.{ Await, Promise, Future } - -object ActorPoolSpec { - - trait Foo { - def sq(x: Int, sleep: Long): Future[Int] - } - - class FooImpl extends Foo { - import TypedActor.dispatcher - def sq(x: Int, sleep: Long): Future[Int] = { - if (sleep > 0) Thread.sleep(sleep) - Promise.successful(x * x) - } - } - - val faultHandler = OneForOneStrategy(List(classOf[Exception]), 5, 1000) -} - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class TypedActorPoolSpec extends AkkaSpec with DefaultTimeout { - import ActorPoolSpec._ - "Actor Pool (2)" must { - "support typed actors" in { - val ta = TypedActor(system) - val pool = ta.createProxy[Foo](new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with Filter with RunningMeanBackoff with BasicRampup { - val typedActor = TypedActor(context) - def lowerBound = 1 - def upperBound = 5 - def pressureThreshold = 1 - def partialFill = true - def selectionCount = 1 - def rampupRate = 0.1 - def backoffRate = 0.50 - def backoffThreshold = 0.50 - def instance(p: Props) = typedActor.getActorRefFor(typedActor.typedActorOf[Foo, FooImpl](props = p.withTimeout(10 seconds))) - def receive = _route - }, Props().withTimeout(10 seconds).withFaultHandler(faultHandler)) - - val results = for (i ← 1 to 100) yield (i, pool.sq(i, 0)) - - for ((i, r) ← results) - Await.result(r, timeout.duration) must equal(i * i) - - ta.stop(pool) - } - } -} - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ActorPoolSpec extends AkkaSpec with DefaultTimeout { - import ActorPoolSpec._ - - "Actor Pool" must { - - "have expected capacity" in { - val latch = TestLatch(2) - val count = new AtomicInteger(0) - - val pool = system.actorOf( - Props(new Actor with DefaultActorPool with FixedCapacityStrategy with SmallestMailboxSelector { - def instance(p: Props) = system.actorOf(p.withCreator(new Actor { - def receive = { - case _ ⇒ - count.incrementAndGet - latch.countDown() - sender.tell("success") - } - })) - - def limit = 2 - def selectionCount = 1 - def partialFill = true - def receive = _route - }).withFaultHandler(faultHandler)) - - val successes = TestLatch(2) - val successCounter = system.actorOf(Props(new Actor { - def receive = { - case "success" ⇒ successes.countDown() - } - })) - - implicit val replyTo = successCounter - pool ! "a" - pool ! "b" - - Await.ready(latch, TestLatch.DefaultTimeout) - Await.ready(successes, TestLatch.DefaultTimeout) - - count.get must be(2) - - Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(2) - - system.stop(pool) - } - - "pass ticket #705" in { - val pool = system.actorOf( - Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with BasicFilter { - def lowerBound = 2 - def upperBound = 20 - def rampupRate = 0.1 - def backoffRate = 0.1 - def backoffThreshold = 0.5 - def partialFill = true - def selectionCount = 1 - def receive = _route - def pressureThreshold = 1 - def instance(p: Props) = system.actorOf(p.withCreator(new Actor { - def receive = { - case req: String ⇒ { - (10 millis).dilated.sleep - sender.tell("Response") - } - } - })) - }).withFaultHandler(faultHandler)) - - try { - (for (count ← 1 to 500) yield pool.?("Test", 20 seconds)) foreach { - Await.result(_, 20 seconds) must be("Response") - } - } finally { - system.stop(pool) - } - } - - "grow as needed under pressure" in { - // make sure the pool starts at the expected lower limit and grows to the upper as needed - // as influenced by the backlog of blocking pooled actors - - var latch = TestLatch(3) - val count = new AtomicInteger(0) - - val pool = system.actorOf( - Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with ActiveActorsPressureCapacitor with SmallestMailboxSelector with BasicNoBackoffFilter { - def instance(p: Props) = system.actorOf(p.withCreator(new Actor { - def receive = { - case n: Int ⇒ - (n millis).dilated.sleep - count.incrementAndGet - latch.countDown() - } - })) - - def lowerBound = 2 - def upperBound = 4 - def rampupRate = 0.1 - def partialFill = true - def selectionCount = 1 - def receive = _route - }).withFaultHandler(faultHandler)) - - // first message should create the minimum number of delgates - - pool ! 1 - - Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(2) - - var loops = 0 - def loop(t: Int) = { - latch = TestLatch(loops) - count.set(0) - for (m ← 0 until loops) { - pool ? t - (50 millis).dilated.sleep - } - } - - // 2 more should go thru without triggering more - - loops = 2 - - loop(500) - Await.ready(latch, TestLatch.DefaultTimeout) - count.get must be(loops) - - Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(2) - - // a whole bunch should max it out - - loops = 10 - loop(500) - Await.ready(latch, TestLatch.DefaultTimeout) - count.get must be(loops) - - Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(4) - - system.stop(pool) - } - - "grow as needed under mailbox pressure" in { - // make sure the pool starts at the expected lower limit and grows to the upper as needed - // as influenced by the backlog of messages in the delegate mailboxes - - var latch = TestLatch(3) - val count = new AtomicInteger(0) - - val pool = system.actorOf( - Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with BasicNoBackoffFilter { - def instance(p: Props) = system.actorOf(p.withCreator(new Actor { - def receive = { - case n: Int ⇒ - (n millis).dilated.sleep - count.incrementAndGet - latch.countDown() - } - })) - - def lowerBound = 2 - def upperBound = 4 - def pressureThreshold = 3 - def rampupRate = 0.1 - def partialFill = true - def selectionCount = 1 - def receive = _route - }).withFaultHandler(faultHandler)) - - var loops = 0 - def loop(t: Int) = { - latch = TestLatch(loops) - count.set(0) - for (m ← 0 until loops) { - pool ! t - } - } - - // send a few messages and observe pool at its lower bound - loops = 3 - loop(500) - Await.ready(latch, TestLatch.DefaultTimeout) - count.get must be(loops) - - Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(2) - - // send a bunch over the threshold and observe an increment - loops = 15 - loop(500) - - Await.ready(latch, 10 seconds) - count.get must be(loops) - - Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be >= (3) - - system.stop(pool) - } - - "round robin" in { - val latch1 = TestLatch(2) - val delegates = new java.util.concurrent.ConcurrentHashMap[String, String] - - val pool1 = system.actorOf( - Props(new Actor with DefaultActorPool with FixedCapacityStrategy with RoundRobinSelector with BasicNoBackoffFilter { - - def instance(p: Props): ActorRef = system.actorOf(p.withCreator(new Actor { - def receive = { - case _ ⇒ - delegates put (self.path.toString, "") - latch1.countDown() - } - })) - - def limit = 1 - def selectionCount = 1 - def rampupRate = 0.1 - def partialFill = true - def receive = _route - }).withFaultHandler(faultHandler)) - - pool1 ! "a" - pool1 ! "b" - - Await.ready(latch1, TestLatch.DefaultTimeout) - delegates.size must be(1) - - system.stop(pool1) - - val latch2 = TestLatch(2) - delegates.clear() - - val pool2 = system.actorOf( - Props(new Actor with DefaultActorPool with FixedCapacityStrategy with RoundRobinSelector with BasicNoBackoffFilter { - def instance(p: Props) = system.actorOf(p.withCreator(new Actor { - def receive = { - case _ ⇒ - delegates put (self.path.toString, "") - latch2.countDown() - } - })) - - def limit = 2 - def selectionCount = 1 - def rampupRate = 0.1 - def partialFill = false - def receive = _route - }).withFaultHandler(faultHandler)) - - pool2 ! "a" - pool2 ! "b" - - Await.ready(latch2, TestLatch.DefaultTimeout) - delegates.size must be(2) - - system.stop(pool2) - } - - "backoff" in { - val latch = TestLatch(10) - - val pool = system.actorOf( - Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with Filter with RunningMeanBackoff with BasicRampup { - def instance(p: Props) = system.actorOf(p.withCreator(new Actor { - def receive = { - case n: Int ⇒ - (n millis).dilated.sleep - latch.countDown() - } - })) - - def lowerBound = 1 - def upperBound = 5 - def pressureThreshold = 1 - def partialFill = true - def selectionCount = 1 - def rampupRate = 0.1 - def backoffRate = 0.50 - def backoffThreshold = 0.50 - def receive = _route - }).withFaultHandler(faultHandler)) - - // put some pressure on the pool - - for (m ← 0 to 10) pool ! 250 - - (5 millis).dilated.sleep - - val z = Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size - - z must be >= (2) - - // let it cool down - - for (m ← 0 to 3) { - pool ! 1 - (500 millis).dilated.sleep - } - - Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be <= (z) - - system.stop(pool) - } - } -} diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala new file mode 100644 index 0000000000..16ac1e0c86 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -0,0 +1,249 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.routing + +import akka.actor.Actor +import akka.testkit._ +import akka.actor.Props +import akka.dispatch.Await +import akka.util.duration._ +import akka.actor.ActorRef +import java.util.concurrent.atomic.AtomicInteger + +object ResizerSpec { + + val config = """ + akka.actor.deployment { + /router1 { + router = round-robin + resizer { + lower-bound = 2 + upper-bound = 3 + } + } + } + """ + + class TestActor extends Actor { + def receive = { + case latch: TestLatch ⇒ latch.countDown() + } + } + + class BusyActor extends Actor { + def receive = { + case (latch: TestLatch, busy: TestLatch) ⇒ + latch.countDown() + Await.ready(busy, 5 seconds) + } + } + +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with ImplicitSender { + + import akka.routing.ResizerSpec._ + + "DefaultResizer" must { + + "use settings to evaluate capacity" in { + val resizer = DefaultResizer( + lowerBound = 2, + upperBound = 3) + + val c1 = resizer.capacity(IndexedSeq.empty[ActorRef]) + c1 must be(2) + + val current = IndexedSeq(system.actorOf(Props[TestActor]), system.actorOf(Props[TestActor])) + val c2 = resizer.capacity(current) + c2 must be(0) + } + + "use settings to evaluate rampUp" in { + val resizer = DefaultResizer( + lowerBound = 2, + upperBound = 10, + rampupRate = 0.2) + + resizer.rampup(pressure = 9, capacity = 10) must be(0) + resizer.rampup(pressure = 5, capacity = 5) must be(1) + resizer.rampup(pressure = 6, capacity = 6) must be(2) + } + + "use settings to evaluate backoff" in { + val resizer = DefaultResizer( + lowerBound = 2, + upperBound = 10, + backoffThreshold = 0.3, + backoffRate = 0.1) + + resizer.backoff(pressure = 10, capacity = 10) must be(0) + resizer.backoff(pressure = 4, capacity = 10) must be(0) + resizer.backoff(pressure = 3, capacity = 10) must be(0) + resizer.backoff(pressure = 2, capacity = 10) must be(-1) + resizer.backoff(pressure = 0, capacity = 10) must be(-1) + resizer.backoff(pressure = 1, capacity = 9) must be(-1) + resizer.backoff(pressure = 0, capacity = 9) must be(-1) + } + + "be possible to define programatically" in { + + val latch = new TestLatch(3) + + val resizer = DefaultResizer( + lowerBound = 2, + upperBound = 3) + val router = system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(resizer = Some(resizer)))) + + router ! latch + router ! latch + router ! latch + + Await.ready(latch, 5 seconds) + + val current = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees] + current.routees.size must be(2) + } + + "be possible to define in configuration" in { + val latch = new TestLatch(3) + + val router = system.actorOf(Props[TestActor].withRouter(FromConfig()), "router1") + + router ! latch + router ! latch + router ! latch + + Await.ready(latch, 5 seconds) + + val current = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees] + current.routees.size must be(2) + } + + "resize when busy" in { + + val busy = new TestLatch(1) + + val resizer = DefaultResizer( + lowerBound = 1, + upperBound = 3, + pressureThreshold = 0, + resizeOnNthMessage = 1) + + val router = system.actorOf(Props[BusyActor].withRouter(RoundRobinRouter(resizer = Some(resizer)))) + + val latch1 = new TestLatch(1) + router.!((latch1, busy)) + Await.ready(latch1, 2 seconds) + + val latch2 = new TestLatch(1) + router.!((latch2, busy)) + Await.ready(latch2, 2 seconds) + + val latch3 = new TestLatch(1) + router.!((latch3, busy)) + Await.ready(latch3, 2 seconds) + + Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3) + + busy.countDown() + } + + "grow as needed under pressure" in { + // make sure the pool starts at the expected lower limit and grows to the upper as needed + // as influenced by the backlog of blocking pooled actors + + var latch = TestLatch(3) + val count = new AtomicInteger(0) + + val resizer = DefaultResizer( + lowerBound = 2, + upperBound = 4, + rampupRate = 0.1, + pressureThreshold = 1, + resizeOnNthMessage = 1, + backoffThreshold = 0.0) + + val router = system.actorOf(Props(new Actor { + def receive = { + case n: Int ⇒ + (n millis).dilated.sleep + count.incrementAndGet + latch.countDown() + } + }).withRouter(RoundRobinRouter(resizer = Some(resizer)))) + + // first message should create the minimum number of routees + router ! 1 + + Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(2) + + def loop(loops: Int, t: Int) = { + latch = TestLatch(loops) + count.set(0) + for (m ← 0 until loops) { + router ! t + (10 millis).dilated.sleep + } + } + + // 2 more should go thru without triggering more + loop(2, 200) + Await.ready(latch, TestLatch.DefaultTimeout) + count.get must be(2) + + Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(2) + + // a whole bunch should max it out + loop(10, 200) + Await.ready(latch, TestLatch.DefaultTimeout) + count.get must be(10) + + Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(4) + + } + + "backoff" in { + + val resizer = DefaultResizer( + lowerBound = 1, + upperBound = 5, + rampupRate = 1.0, + backoffRate = 1.0, + backoffThreshold = 0.20, + pressureThreshold = 1, + resizeOnNthMessage = 1) + + val router = system.actorOf(Props(new Actor { + def receive = { + case n: Int ⇒ + (n millis).dilated.sleep + } + }).withRouter(RoundRobinRouter(resizer = Some(resizer)))) + + // put some pressure on the router + for (m ← 0 to 5) { + router ! 100 + (5 millis).dilated.sleep + } + + val z = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size + z must be >= (2) + + (300 millis).dilated.sleep + + // let it cool down + for (m ← 0 to 3) { + router ! 1 + (200 millis).dilated.sleep + } + + Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be < (z) + + } + + } + +} diff --git a/akka-actor-tests/src/test/scala/akka/routing/RouterPoolSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RouterPoolSpec.scala deleted file mode 100644 index 3ca71e3180..0000000000 --- a/akka-actor-tests/src/test/scala/akka/routing/RouterPoolSpec.scala +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright (C) 2009-2011 Typesafe Inc. - */ -package akka.routing - -import akka.actor.Actor -import akka.testkit.AkkaSpec -import akka.testkit.DefaultTimeout -import akka.testkit.ImplicitSender -import akka.testkit.TestLatch -import akka.actor.Props -import akka.dispatch.Await -import akka.util.duration._ -import akka.actor.ActorRef - -object RouterPoolSpec { - - val config = """ - akka.actor.deployment { - /router1 { - router = round-robin - pool { - lower-bound = 2 - upper-bound = 3 - } - } - } - """ - - class TestActor extends Actor { - def receive = { - case latch: TestLatch ⇒ latch.countDown() - } - } - -} - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class RouterPoolSpec extends AkkaSpec(RouterPoolSpec.config) with DefaultTimeout with ImplicitSender { - - import akka.routing.RouterPoolSpec._ - - "DefaultRouterPool" must { - - "use settings to evaluate capacity" in { - val pool = DefaultRouterPool( - lowerBound = 2, - upperBound = 3) - - val c1 = pool.capacity(IndexedSeq.empty[ActorRef]) - c1 must be(2) - - val current = IndexedSeq(system.actorOf(Props[TestActor]), system.actorOf(Props[TestActor])) - val c2 = pool.capacity(current) - c2 must be(0) - } - - "be possible to define programatically" in { - val latch = new TestLatch(3) - - val pool = DefaultRouterPool( - lowerBound = 2, - upperBound = 3) - val router = system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(pool = Some(pool)))) - - router ! latch - router ! latch - router ! latch - - Await.ready(latch, 5 seconds) - - val current = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees] - current.routees.size must be(2) - } - - "be possible to define in configuration" in { - val latch = new TestLatch(3) - - val router = system.actorOf(Props[TestActor].withRouter(FromConfig()), "router1") - - router ! latch - router ! latch - router ! latch - - Await.ready(latch, 5 seconds) - - val current = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees] - current.routees.size must be(2) - } - - } - -} diff --git a/akka-actor-tests/src/test/scala/akka/ticket/Ticket703Spec.scala b/akka-actor-tests/src/test/scala/akka/ticket/Ticket703Spec.scala deleted file mode 100644 index f51beb7617..0000000000 --- a/akka-actor-tests/src/test/scala/akka/ticket/Ticket703Spec.scala +++ /dev/null @@ -1,34 +0,0 @@ -package akka.ticket - -import akka.actor._ -import akka.routing._ -import akka.testkit.AkkaSpec -import akka.dispatch.Await -import akka.util.duration._ - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class Ticket703Spec extends AkkaSpec { - - "A ? call to an actor pool" should { - "reuse the proper timeout" in { - val actorPool = system.actorOf( - Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with BasicNoBackoffFilter { - def lowerBound = 2 - def upperBound = 20 - def rampupRate = 0.1 - def partialFill = true - def selectionCount = 1 - def receive = _route - def pressureThreshold = 1 - def instance(p: Props) = system.actorOf(p.withCreator(new Actor { - def receive = { - case req: String ⇒ - Thread.sleep(6000L) - sender.tell("Response") - } - })) - }).withFaultHandler(OneForOneStrategy(List(classOf[Exception]), 5, 1000))) - Await.result(actorPool.?("Ping", 10000), 10 seconds) must be === "Response" - } - } -} diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 7d4800e807..5b74a24c8f 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -98,16 +98,54 @@ akka { paths = [] } - # FIXME document pool settings - pool { + # Routers with dynamically resizable number of routees + resizer { + + # The fewest number of routees the router should ever have. lower-bound = 1 + + # The most number of routees the router should ever have. + # Must be greater than or equal to lower-bound. upper-bound = 10 - pressure-threshold = 3 + + # Threshold to evaluate if routee is considered to be busy (under pressure). + # Implementation depends on this value (default is 1). + # 0: number of routees currently processing a message. + # 1: number of routees currently processing a message has + # some messages in mailbox. + # > 1: number of routees with at least the configured pressure-threshold + # messages in their mailbox. Note that estimating mailbox size of + # default UnboundedMailbox is O(N) operation. + pressure-threshold = 1 + + # Percentage to increase capacity whenever all routees are busy. + # For example, 0.2 would increase 20% (rounded up), i.e. if current + # capacity is 6 it will request an increase of 2 more routees. rampup-rate = 0.2 - backoff-threshold = 0.7 + + # Minimum fraction of busy routees before backing off. + # For example, if this is 0.3, then we'll remove some routees only when + # less than 30% of routees are busy, i.e. if current capacity is 10 and + # 3 are busy then the capacity is unchanged, but if 2 or less are busy + # the capacity is decreased. + # Use 0.0 or negative to avoid removal of routees. + backoff-threshold = 0.3 + + # Fraction of routees to be removed when the resizer reaches the + # backoffThreshold. + # For example, 0.1 would decrease 10% (rounded up), i.e. if current + # capacity is 9 it will request an decrease of 1 routee. backoff-rate = 0.1 - # When the pool shrink the abandoned actors are stopped with PoisonPill after this delay - stop-delay = 1 second + + # When the resizer reduce the capacity the abandoned routee actors are stopped + # with PoisonPill after this delay. The reason for the delay is to give concurrent + # messages a chance to be placed in mailbox before sending PoisonPill. + # Use 0s to skip delay. + stop-delay = 1s + + # Number of messages between resize operation. + # Use 1 to resize before each message. + resize-on-nth-message = 10 } } } diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 48475169ee..f32bb6fea8 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -55,26 +55,27 @@ class Deployer(val settings: ActorSystem.Settings) { val within = Duration(deployment.getMilliseconds("within"), TimeUnit.MILLISECONDS) - val pool: Option[RouterPool] = if (config.hasPath("pool")) { - val poolConfig = deployment.getConfig("pool") - Some(DefaultRouterPool( - lowerBound = poolConfig.getInt("lower-bound"), - upperBound = poolConfig.getInt("upper-bound"), - pressureThreshold = poolConfig.getInt("pressure-threshold"), - rampupRate = poolConfig.getDouble("rampup-rate"), - backoffThreshold = poolConfig.getDouble("backoff-threshold"), - backoffRate = poolConfig.getDouble("backoff-rate"), - stopDelay = Duration(poolConfig.getMilliseconds("stop-delay"), TimeUnit.MILLISECONDS))) + val resizer: Option[Resizer] = if (config.hasPath("resizer")) { + val resizerConfig = deployment.getConfig("resizer") + Some(DefaultResizer( + lowerBound = resizerConfig.getInt("lower-bound"), + upperBound = resizerConfig.getInt("upper-bound"), + pressureThreshold = resizerConfig.getInt("pressure-threshold"), + rampupRate = resizerConfig.getDouble("rampup-rate"), + backoffThreshold = resizerConfig.getDouble("backoff-threshold"), + backoffRate = resizerConfig.getDouble("backoff-rate"), + stopDelay = Duration(resizerConfig.getMilliseconds("stop-delay"), TimeUnit.MILLISECONDS), + resizeOnNthMessage = resizerConfig.getInt("resize-on-nth-message"))) } else { None } val router: RouterConfig = deployment.getString("router") match { case "from-code" ⇒ NoRouter - case "round-robin" ⇒ RoundRobinRouter(nrOfInstances, routees, pool) - case "random" ⇒ RandomRouter(nrOfInstances, routees, pool) - case "scatter-gather" ⇒ ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within, pool) - case "broadcast" ⇒ BroadcastRouter(nrOfInstances, routees, pool) + case "round-robin" ⇒ RoundRobinRouter(nrOfInstances, routees, resizer) + case "random" ⇒ RandomRouter(nrOfInstances, routees, resizer) + case "scatter-gather" ⇒ ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within, resizer) + case "broadcast" ⇒ BroadcastRouter(nrOfInstances, routees, resizer) case x ⇒ throw new ConfigurationException("unknown router type " + x + " for path " + key) } diff --git a/akka-actor/src/main/scala/akka/routing/Pool.scala b/akka-actor/src/main/scala/akka/routing/Pool.scala deleted file mode 100644 index 988820cf18..0000000000 --- a/akka-actor/src/main/scala/akka/routing/Pool.scala +++ /dev/null @@ -1,492 +0,0 @@ -/** - * Copyright (C) 2009-2011 Typesafe Inc. - */ - -package akka.routing - -import akka.dispatch.{ Promise } -import akka.actor._ - -/** - * Actor pooling - * - * An actor pool is an message router for a set of delegate actors. The pool is an actor itself. - * There are a handful of basic concepts that need to be understood when working with and defining your pool. - * - * Selectors - A selector is a trait that determines how and how many pooled actors will receive an incoming message. - * Capacitors - A capacitor is a trait that influences the size of pool. There are effectively two types. - * The first determines the size itself - either fixed or bounded. - * The second determines how to adjust of the pool according to some internal pressure characteristic. - * Filters - A filter can be used to refine the raw pressure value returned from a capacitor. - * - * It should be pointed out that all actors in the pool are treated as essentially equivalent. This is not to say - * that one couldn't instance different classes within the pool, only that the pool, when selecting and routing, - * will not take any type information into consideration. - * - * @author Garrick Evans - */ - -object ActorPool { - case object Stat - case class Stats(size: Int) -} - -/** - * Defines the nature of an actor pool. - */ -trait ActorPool { - /** - * Adds a new actor to the pool. The DefaultActorPool implementation will start and link (supervise) this actor. - * This method is invoked whenever the pool determines it must boost capacity. - * @return A new actor for the pool - */ - def instance(defaults: Props): ActorRef - - /** - * This method gets called when a delegate is to be evicted, by default it sends a PoisonPill to the delegate - */ - def evict(delegate: ActorRef): Unit = delegate ! PoisonPill - - /** - * Returns the overall desired change in pool capacity. This method is used by non-static pools as the means - * for the capacity strategy to influence the pool. - * @param _delegates The current sequence of pooled actors - * @return the number of delegates by which the pool should be adjusted (positive, negative or zero) - */ - def capacity(delegates: Seq[ActorRef]): Int - /** - * Provides the results of the selector, one or more actors, to which an incoming message is forwarded. - * This method returns an iterator since a selector might return more than one actor to handle the message. - * You might want to do this to perform redundant processing of particularly error-prone messages. - * @param _delegates The current sequence of pooled actors - * @return a list of actors to which the message will be delivered - */ - def select(delegates: Seq[ActorRef]): Seq[ActorRef] -} - -/** - * A default implementation of a pool that: - * First, invokes the pool's capacitor that tells it, based on the current delegate count - * and its own heuristic by how many delegates the pool should be resized. Resizing can - * can be incremental, decremental or flat. If there is a change to capacity, new delegates - * are added or existing ones are removed. Removed actors are sent the PoisonPill message. - * New actors are automatically started and linked. The pool supervises the actors and will - * use the fault handling strategy specified by the mixed-in ActorPoolSupervisionConfig. - * Pooled actors may be any lifecycle. If you're testing pool sizes during runtime, take a - * look at the unit tests... Any delegate with a Permanent lifecycle will be - * restarted and the pool size will be level with what it was prior to the fault. In just - * about every other case, e.g. the delegates are Temporary or the delegate cannot be - * restarted within the time interval specified in the fault handling strategy, the pool will - * be temporarily shy by that actor (it will have been removed by not back-filled). The - * back-fill if any is required, will occur on the next message [as usual]. - * - * Second, invokes the pool's selector that returns a list of delegates that are to receive - * the incoming message. Selectors may return more than one actor. If partialFill - * is true then it might also the case that fewer than number of desired actors will be - * returned. If partialFill is false, the selector may return duplicate actors to - * reach the desired selectionCount. - * - * Lastly, routes by forwarding the incoming message to each delegate in the selected set. - */ -trait DefaultActorPool extends ActorPool { this: Actor ⇒ - import ActorPool._ - - protected[akka] var _delegates = Vector[ActorRef]() - - val defaultProps: Props = Props.default.withDispatcher(this.context.dispatcher.id) - - override def preStart() { - resizeIfAppropriate() - } - - override def postStop() { - _delegates foreach evict - _delegates = Vector.empty - } - - protected def _route(): Actor.Receive = { - // for testing... - case Stat ⇒ - sender ! Stats(_delegates length) - case Terminated(victim) ⇒ - _delegates = _delegates filterNot { victim == } - case msg ⇒ - resizeIfAppropriate() - - select(_delegates) foreach { _ forward msg } - } - - protected def resizeIfAppropriate() { - val requestedCapacity = capacity(_delegates) - val newDelegates = requestedCapacity match { - case qty if qty > 0 ⇒ - _delegates ++ Vector.fill(requestedCapacity)(context.watch(instance(defaultProps))) - - case qty if qty < 0 ⇒ - _delegates.splitAt(_delegates.length + requestedCapacity) match { - case (keep, abandon) ⇒ - abandon foreach evict - keep - } - case _ ⇒ _delegates //No change - } - - _delegates = newDelegates - } -} - -/** - * Selectors - * - * These traits define how, when a message needs to be routed, delegate(s) are chosen from the pool. - * Note that it's acceptable to return more than one actor to handle a given message. - */ - -/** - * Returns the set of delegates with the least amount of message backlog. - */ -trait SmallestMailboxSelector { - /** - * @return the number of delegates that will receive each message - */ - def selectionCount: Int - /** - * If there aren't enough delegates to provide the selectionCount, either - * send the message to fewer, or send the message selectionCount times - * including more than once to some of the delegates. This setting does - * not matter if you configure selectionCount to always be less than or - * equal to the number of delegates in the pool. - * @return true to send to fewer delegates or false to send to duplicate delegates - */ - def partialFill: Boolean - - def select(delegates: Seq[ActorRef]): Seq[ActorRef] = { - var set: Seq[ActorRef] = Nil - var take = if (partialFill) math.min(selectionCount, delegates.length) else selectionCount - - def mailboxSize(a: ActorRef): Int = a match { - case l: LocalActorRef ⇒ l.underlying.mailbox.numberOfMessages - case _ ⇒ Int.MaxValue //Non-local actors mailbox size is unknown, so consider them lowest priority - } - - while (take > 0) { - set = delegates.sortWith((a, b) ⇒ mailboxSize(a) < mailboxSize(b)).take(take) ++ set //Question, doesn't this risk selecting the same actor multiple times? - take -= set.size - } - - set - } -} - -/** - * Returns the set of delegates that occur sequentially 'after' the last delegate from the previous selection - */ -trait RoundRobinSelector { - private var _last: Int = -1; - /** - * @return the number of delegates that will receive each message - */ - def selectionCount: Int - /** - * If there aren't enough delegates to provide the selectionCount, either - * send the message to fewer, or send the message selectionCount times - * including more than once to some of the delegates. This setting does - * not matter if you configure selectionCount to always be less than or - * equal to the number of delegates in the pool. - * @return true to send to fewer delegates or false to send to duplicate delegates - */ - def partialFill: Boolean - - def select(delegates: Seq[ActorRef]): Seq[ActorRef] = { - val length = delegates.length - val take = if (partialFill) math.min(selectionCount, length) - else selectionCount - - val set = - for (i ← 0 until take) yield { - _last = (_last + 1) % length - delegates(_last) - } - - set - } -} - -/** - * Capacitors - * - * These traits define how to alter the size of the pool according to some desired behavior. - * Capacitors are required (minimally) by the pool to establish bounds on the number of delegates - * that may exist in the pool. - */ - -/** - * Ensures a fixed number of delegates in the pool - */ -trait FixedSizeCapacitor { - /** - * @return the fixed number of delegates the pool should have - */ - def limit: Int - def capacity(delegates: Seq[ActorRef]): Int = (limit - delegates.size) max 0 -} - -/** - * Constrains the number of delegates to a bounded range. - * You probably don't want to use this trait directly, - * instead look at [[akka.routing.CapacityStrategy]] and [[akka.routing.BoundedCapacityStrategy]]. - * To use this trait you have to implement _eval() which is provided by - * [[akka.routing.BoundedCapacityStrategy]] in terms of pressure() and filter() - * methods. - */ -trait BoundedCapacitor { - /** - * @return the fewest delegates the pool should ever have - */ - def lowerBound: Int - /** - * @return the most delegates the pool should ever have - */ - def upperBound: Int - - def capacity(delegates: Seq[ActorRef]): Int = { - val current = delegates length - val delta = _eval(delegates) - val proposed = current + delta - - if (proposed < lowerBound) delta + (lowerBound - proposed) - else if (proposed > upperBound) delta - (proposed - upperBound) - else delta - } - - /** - * This method is defined when you mix in [[akka.routing.CapacityStrategy]]; it - * returns the "raw" proposed delta which is then clamped by - * lowerBound and upperBound. - * @return proposed delta ignoring bounds - */ - protected def _eval(delegates: Seq[ActorRef]): Int -} - -/** - * Implements pressure() to return the number of delegates with overly-full mailboxes, - * where the pressureThreshold method defines what counts as overly-full. - */ -trait MailboxPressureCapacitor { - - /** - * The pressure will be the number of delegates with at least - * pressureThreshold messages in their mailbox. - * @return mailbox size that counts as pressure - */ - def pressureThreshold: Int - def pressure(delegates: Seq[ActorRef]): Int = - delegates count { - case a: LocalActorRef ⇒ a.underlying.mailbox.numberOfMessages > pressureThreshold - case _ ⇒ false - } -} - -/** - * Implements pressure() to return the number of actors currently processing a - * message. - * In other words, this capacitor counts how many - * delegates are tied up actively processing a message - */ -trait ActiveActorsPressureCapacitor { - def pressure(delegates: Seq[ActorRef]): Int = - delegates count { - case a: LocalActorRef ⇒ - val cell = a.underlying - cell.mailbox.isScheduled && cell.currentMessage != null - case _ ⇒ false - } -} - -/** - * A [[akka.routing.CapacityStrategy]] implements methods pressure() and filter(), where - * pressure() returns the number of "busy" delegates, and filter() computes - * a proposed delta (positive, negative, or zero) in the size of the delegate - * pool. - */ -trait CapacityStrategy { - import ActorPool._ - - /** - * This method returns the number of delegates considered busy, or 'pressure level', - * which will be fed into the capacitor and evaluated against the established threshhold. - * For instance, in general, if the current pressure level exceeds the capacity of the - * pool, new delegates will be added. - * @param delegates the current pool of delegates - * @return number of busy delegates, between 0 and delegates.length - */ - def pressure(delegates: Seq[ActorRef]): Int - /** - * This method can be used to smooth the response of the capacitor by considering - * the current pressure and current capacity. - * - * @param pressure current number of busy delegates - * @param capacity current number of delegates - * @return proposed change in the capacity - */ - def filter(pressure: Int, capacity: Int): Int - - /** - * Overrides the _eval() method in [[akka.routing.BoundedCapacity]], - * using filter and pressure to compute a proposed delta. - * @param delegates current delegates - * @return proposed delta in capacity - */ - protected def _eval(delegates: Seq[ActorRef]): Int = filter(pressure(delegates), delegates.size) -} - -/** - * Use this trait to setup a pool that uses a fixed delegate count. - */ -trait FixedCapacityStrategy extends FixedSizeCapacitor - -/** - * Use this trait to setup a pool that may have a variable number of - * delegates but always within an established upper and lower limit. - * - * If mix this into your pool implementation, you must also provide a - * PressureCapacitor and a Filter. - */ -trait BoundedCapacityStrategy extends CapacityStrategy with BoundedCapacitor - -/** - * Filters - * These traits compute a proposed capacity delta from the pressure (pressure - * is the number of busy delegates) and the current capacity. - */ - -/** - * The basic filter trait that composes ramp-up and and back-off subfiltering. - * filter() is defined to be the sum of rampup() and backoff(). - */ -trait Filter { - /** - * Computes a proposed positive (or zero) capacity delta. - * @param pressure the current number of busy delegates - * @param capacity the current number of total delegates - * @return proposed increase in capacity - */ - def rampup(pressure: Int, capacity: Int): Int - /** - * Computes a proposed negative (or zero) capacity delta. - * @param pressure the current number of busy delegates - * @param capacity the current number of total delegates - * @return proposed decrease in capacity (as a negative number) - */ - def backoff(pressure: Int, capacity: Int): Int - - // pass through both filters just to be sure any internal counters - // are updated consistently. ramping up is always + and backing off - // is always - and each should return 0 otherwise... - def filter(pressure: Int, capacity: Int): Int = - rampup(pressure, capacity) + backoff(pressure, capacity) -} - -/** - * This trait is a convenient shorthand to use the [[akka.routing.BasicRampup]] - * and [[akka.routing.BasicBackoff]] subfilters together. - */ -trait BasicFilter extends Filter with BasicRampup with BasicBackoff - -/** - * Filter performs steady incremental growth using only the basic ramp-up subfilter. - * The pool of delegates never gets smaller, only larger. - */ -trait BasicNoBackoffFilter extends BasicRampup { - def filter(pressure: Int, capacity: Int): Int = rampup(pressure, capacity) -} - -/** - * Basic incremental growth as a percentage of the current pool capacity. - * Whenever pressure reaches capacity (i.e. all delegates are busy), - * the capacity is increased by a percentage. - */ -trait BasicRampup { - /** - * Percentage to increase capacity whenever all delegates are busy. - * For example, 0.2 would increase 20%, etc. - * @return percentage increase in capacity when delegates are all busy. - */ - def rampupRate: Double - - def rampup(pressure: Int, capacity: Int): Int = - if (pressure < capacity) 0 else math.ceil(rampupRate * capacity) toInt -} - -/** - * Basic decrement as a percentage of the current pool capacity. - * Whenever pressure as a percentage of capacity falls below the - * backoffThreshold, capacity is reduced by the backoffRate. - */ -trait BasicBackoff { - /** - * Fraction of capacity the pool has to fall below before backing off. - * For example, if this is 0.7, then we'll remove some delegates when - * less than 70% of delegates are busy. - * @return fraction of busy delegates where we start to backoff - */ - def backoffThreshold: Double - /** - * Fraction of delegates to be removed when the pool reaches the - * backoffThreshold. - * @return percentage of delegates to remove - */ - def backoffRate: Double - - def backoff(pressure: Int, capacity: Int): Int = - if (capacity > 0 && pressure / capacity < backoffThreshold) math.ceil(-1.0 * backoffRate * capacity) toInt else 0 -} -/** - * This filter tracks the average pressure over the lifetime of the pool (or since last reset) and - * will begin to reduce capacity once this value drops below the provided threshold. The number of - * delegates to cull from the pool is determined by some scaling factor (the backoffRate) multiplied - * by the difference in capacity and pressure. - * - * In essence, [[akka.routing.RunningMeanBackoff]] works the same way as [[akka.routing.BasicBackoff]] - * except that it uses - * a running mean pressure and capacity rather than the current pressure and capacity. - */ -trait RunningMeanBackoff { - /** - * Fraction of mean capacity the pool has to fall below before backing off. - * For example, if this is 0.7, then we'll remove some delegates when - * less than 70% of delegates are busy on average. - * @return fraction of busy delegates where we start to backoff - */ - def backoffThreshold: Double - /** - * The fraction of delegates to be removed when the running mean reaches the - * backoffThreshold. - * @return percentage reduction in capacity - */ - def backoffRate: Double - - private var _pressure: Double = 0.0 - private var _capacity: Double = 0.0 - - def backoff(pressure: Int, capacity: Int): Int = { - _pressure += pressure - _capacity += capacity - - if (capacity > 0 && pressure / capacity < backoffThreshold - && _capacity > 0 && _pressure / _capacity < backoffThreshold) //Why does the entire clause need to be true? - math.floor(-1.0 * backoffRate * (capacity - pressure)).toInt - else 0 - } - - /** - * Resets the running mean pressure and capacity. - * This is never invoked by the library, you have to do - * it by hand if there are points in time where it makes - * sense. - */ - def backoffReset { - _pressure = 0.0 - _capacity = 0.0 - } -} diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 2e4e01992e..f920eb3df0 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -36,7 +36,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup } def removeRoutees(abandonedRoutees: IndexedSeq[ActorRef]) { - _routees = _routees filterNot (x ⇒ abandonedRoutees.contains(x)) + _routees = _routees diff abandonedRoutees abandonedRoutees foreach underlying.unwatch } @@ -53,7 +53,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup else Nil } - if (_props.routerConfig.pool.isEmpty && _routees.isEmpty) + if (_props.routerConfig.resizer.isEmpty && _routees.isEmpty) throw new ActorInitializationException("router " + _props.routerConfig + " did not register routees!") _routees match { @@ -61,7 +61,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup } override def !(message: Any)(implicit sender: ActorRef = null): Unit = { - _props.routerConfig.resizePool(routeeProps, actorContext, routees) + _props.routerConfig.resize(routeeProps, actorContext, routees) val s = if (sender eq null) underlying.system.deadLetters else sender @@ -77,7 +77,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup } override def ?(message: Any)(implicit timeout: Timeout): Future[Any] = { - _props.routerConfig.resizePool(routeeProps, actorContext, routees) + _props.routerConfig.resize(routeeProps, actorContext, routees) super.?(message)(timeout) } } @@ -123,9 +123,9 @@ trait RouterConfig { } protected def createAndRegisterRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): Unit = { - pool match { + resizer match { case None ⇒ registerRoutees(context, createRoutees(props, context, nrOfInstances, routees)) - case Some(p) ⇒ resizePool(props, context, context.self.asInstanceOf[RoutedActorRef].routees) + case Some(p) ⇒ resize(props, context, context.self.asInstanceOf[RoutedActorRef].routees) } } @@ -143,17 +143,18 @@ trait RouterConfig { context.self.asInstanceOf[RoutedActorRef].removeRoutees(routees) } - def pool: Option[RouterPool] = None + def resizer: Option[Resizer] = None - private val resizePoolInProgress = new AtomicBoolean + private val resizeProgress = new AtomicBoolean + private val resizeCounter = new AtomicLong - def resizePool(props: Props, context: ActorContext, currentRoutees: IndexedSeq[ActorRef]) { - for (p ← pool) { - if (resizePoolInProgress.compareAndSet(false, true)) { + def resize(props: Props, context: ActorContext, currentRoutees: IndexedSeq[ActorRef]) { + for (r ← resizer) { + if (r.isTimeForResize(resizeCounter.getAndIncrement()) && resizeProgress.compareAndSet(false, true)) { try { - p.resize(props, context, currentRoutees, this) + r.resize(props, context, currentRoutees, this) } finally { - resizePoolInProgress.set(false) + resizeProgress.set(false) } } } @@ -287,7 +288,7 @@ object RoundRobinRouter { * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val pool: Option[RouterPool] = None) +case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None) extends RouterConfig with RoundRobinLike { /** @@ -307,10 +308,10 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = } /** - * Constructor that sets the pool to be used. + * Constructor that sets the resizer to be used. * Java API */ - def this(pool: RouterPool) = this(pool = Some(pool)) + def this(resizer: Resizer) = this(resizer = Some(resizer)) } trait RoundRobinLike { this: RouterConfig ⇒ @@ -361,7 +362,7 @@ object RandomRouter { * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val pool: Option[RouterPool] = None) +case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None) extends RouterConfig with RandomLike { /** @@ -381,10 +382,10 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, } /** - * Constructor that sets the pool to be used. + * Constructor that sets the resizer to be used. * Java API */ - def this(pool: RouterPool) = this(pool = Some(pool)) + def this(resizer: Resizer) = this(resizer = Some(resizer)) } trait RandomLike { this: RouterConfig ⇒ @@ -438,7 +439,7 @@ object BroadcastRouter { * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val pool: Option[RouterPool] = None) +case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None) extends RouterConfig with BroadcastLike { /** @@ -458,10 +459,10 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N } /** - * Constructor that sets the pool to be used. + * Constructor that sets the resizer to be used. * Java API */ - def this(pool: RouterPool) = this(pool = Some(pool)) + def this(resizer: Resizer) = this(resizer = Some(resizer)) } @@ -506,7 +507,7 @@ object ScatterGatherFirstCompletedRouter { * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration, - override val pool: Option[RouterPool] = None) + override val resizer: Option[Resizer] = None) extends RouterConfig with ScatterGatherFirstCompletedLike { /** @@ -526,10 +527,10 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It } /** - * Constructor that sets the pool to be used. + * Constructor that sets the resizer to be used. * Java API */ - def this(pool: RouterPool, w: Duration) = this(pool = Some(pool), within = w) + def this(resizer: Resizer, w: Duration) = this(resizer = Some(resizer), within = w) } trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ @@ -555,55 +556,101 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ } /** - * Routers with dynamically resizable number of routees is implemented by providing a pool - * implementation in [[akka.routing.RouterConfig]]. When the resize method is invoked you can - * create and register more routees with `routerConfig.registerRoutees(actorContext, newRoutees) - * or remove routees with `routerConfig.unregisterRoutees(actorContext, abandonedRoutees)` and - * sending [[akka.actor.PoisonPill]] to them. + * Routers with dynamically resizable number of routees is implemented by providing a Resizer + * implementation in [[akka.routing.RouterConfig]]. */ -trait RouterPool { +trait Resizer { + /** + * Is it time for resizing. Typically implemented with modulo of nth message, but + * could be based on elapsed time or something else. The messageCounter starts with 0 + * for the initial resize and continues with 1 for the first message. Make sure to perform + * initial resize before first message (messageCounter == 0), because there is no guarantee + * that resize will be done when concurrent messages are in play. + */ + def isTimeForResize(messageCounter: Long): Boolean + /** + * Decide if the capacity of the router need to be changed. Will be invoked when `isTimeForResize` + * returns true and no other resize is in progress. + * Create and register more routees with `routerConfig.registerRoutees(actorContext, newRoutees) + * or remove routees with `routerConfig.unregisterRoutees(actorContext, abandonedRoutees)` and + * sending [[akka.actor.PoisonPill]] to them. + */ def resize(props: Props, actorContext: ActorContext, currentRoutees: IndexedSeq[ActorRef], routerConfig: RouterConfig) } -case class DefaultRouterPool( +case class DefaultResizer( /** - * The fewest number of routees the pool should ever have + * The fewest number of routees the router should ever have. */ lowerBound: Int = 1, /** - * The most number of routees the pool should ever have + * The most number of routees the router should ever have. + * Must be greater than or equal to `lowerBound`. */ upperBound: Int = 10, /** - * A routee is considered to be busy (under pressure) when - * it has at least this number of messages in its mailbox. - * When pressureThreshold is defined as 0 the routee - * is considered busy when it is currently processing a - * message. + * Threshold to evaluate if routee is considered to be busy (under pressure). + * Implementation depends on this value (default is 1). + *
    + *
  • 0: number of routees currently processing a message.
  • + *
  • 1: number of routees currently processing a message has + * some messages in mailbox.
  • + *
  • > 1: number of routees with at least the configured `pressureThreshold` + * messages in their mailbox. Note that estimating mailbox size of + * default UnboundedMailbox is O(N) operation.
  • + *
*/ - pressureThreshold: Int = 3, + pressureThreshold: Int = 1, /** * Percentage to increase capacity whenever all routees are busy. - * For example, 0.2 would increase 20%, etc. + * For example, 0.2 would increase 20% (rounded up), i.e. if current + * capacity is 6 it will request an increase of 2 more routees. */ rampupRate: Double = 0.2, /** - * Fraction of capacity the pool has to fall below before backing off. - * For example, if this is 0.7, then we'll remove some routees when - * less than 70% of routees are busy. - * Use 0.0 to avoid removal of routees. + * Minimum fraction of busy routees before backing off. + * For example, if this is 0.3, then we'll remove some routees only when + * less than 30% of routees are busy, i.e. if current capacity is 10 and + * 3 are busy then the capacity is unchanged, but if 2 or less are busy + * the capacity is decreased. + * + * Use 0.0 or negative to avoid removal of routees. */ - backoffThreshold: Double = 0.7, + backoffThreshold: Double = 0.3, /** - * Fraction of routees to be removed when the pool reaches the + * Fraction of routees to be removed when the resizer reaches the * backoffThreshold. - * Use 0.0 to avoid removal of routees. + * For example, 0.1 would decrease 10% (rounded up), i.e. if current + * capacity is 9 it will request an decrease of 1 routee. */ backoffRate: Double = 0.1, /** - * When the pool shrink the abandoned actors are stopped with PoisonPill after this delay + * When the resizer reduce the capacity the abandoned routee actors are stopped + * with PoisonPill after this delay. The reason for the delay is to give concurrent + * messages a chance to be placed in mailbox before sending PoisonPill. + * Use 0 seconds to skip delay. */ - stopDelay: Duration = 1.second) extends RouterPool { + stopDelay: Duration = 1.second, + /** + * Number of messages between resize operation. + * Use 1 to resize before each message. + */ + resizeOnNthMessage: Int = 10) extends Resizer { + + /** + * Java API constructor for default values except bounds. + */ + def this(lower: Int, upper: Int) = this(lowerBound = lower, upperBound = upper) + + if (lowerBound < 0) throw new IllegalArgumentException("lowerBound must be >= 0, was: [%s]".format(lowerBound)) + if (upperBound < 0) throw new IllegalArgumentException("upperBound must be >= 0, was: [%s]".format(upperBound)) + if (upperBound < lowerBound) throw new IllegalArgumentException("upperBound must be >= lowerBound, was: [%s] < [%s]".format(upperBound, lowerBound)) + if (rampupRate < 0.0) throw new IllegalArgumentException("rampupRate must be >= 0.0, was [%s]".format(rampupRate)) + if (backoffThreshold > 1.0) throw new IllegalArgumentException("backoffThreshold must be <= 1.0, was [%s]".format(backoffThreshold)) + if (backoffRate < 0.0) throw new IllegalArgumentException("backoffRate must be >= 0.0, was [%s]".format(backoffRate)) + if (resizeOnNthMessage <= 0) throw new IllegalArgumentException("resizeOnNthMessage must be > 0, was [%s]".format(resizeOnNthMessage)) + + def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % resizeOnNthMessage == 0) def resize(props: Props, actorContext: ActorContext, currentRoutees: IndexedSeq[ActorRef], routerConfig: RouterConfig) { val requestedCapacity = capacity(currentRoutees) @@ -623,17 +670,23 @@ case class DefaultRouterPool( * sending PoisonPill. */ protected def delayedStop(scheduler: Scheduler, abandon: IndexedSeq[ActorRef]) { - scheduler.scheduleOnce(stopDelay) { - abandon foreach (_ ! PoisonPill) + if (abandon.nonEmpty) { + if (stopDelay <= Duration.Zero) { + abandon foreach (_ ! PoisonPill) + } else { + scheduler.scheduleOnce(stopDelay) { + abandon foreach (_ ! PoisonPill) + } + } } } /** - * Returns the overall desired change in pool capacity. Positive value will - * add routees to the pool. Negative value will remove routees from the - * pool. - * @param routees The current actor in the pool - * @return the number of routees by which the pool should be adjusted (positive, negative or zero) + * Returns the overall desired change in resizer capacity. Positive value will + * add routees to the resizer. Negative value will remove routees from the + * resizer. + * @param routees The current actor in the resizer + * @return the number of routees by which the resizer should be adjusted (positive, negative or zero) */ def capacity(routees: IndexedSeq[ActorRef]): Int = { val currentSize = routees.size @@ -648,27 +701,41 @@ case class DefaultRouterPool( /** * Number of routees considered busy, or above 'pressure level'. * - * Default implementation: - * When `pressureThreshold` > 0 the number of routees with at least - * the configured `pressureThreshold` messages in their mailbox, - * otherwise number of routees currently processing a - * message. + * Implementation depends on the value of `pressureThreshold` + * (default is 1). + *
    + *
  • 0: number of routees currently processing a message.
  • + *
  • 1: number of routees currently processing a message has + * some messages in mailbox.
  • + *
  • > 1: number of routees with at least the configured `pressureThreshold` + * messages in their mailbox. Note that estimating mailbox size of + * default UnboundedMailbox is O(N) operation.
  • + *
* - * @param routees the current pool of routees + * @param routees the current resizer of routees * @return number of busy routees, between 0 and routees.size */ - def pressure(routees: Seq[ActorRef]): Int = { - if (pressureThreshold > 0) { + def pressure(routees: IndexedSeq[ActorRef]): Int = { + if (pressureThreshold == 1) { + routees count { + case a: LocalActorRef ⇒ + val cell = a.underlying + a.underlying.mailbox.isScheduled && cell.currentMessage != null && a.underlying.mailbox.hasMessages + case x ⇒ + false + } + } else if (pressureThreshold > 1) { routees count { case a: LocalActorRef ⇒ a.underlying.mailbox.numberOfMessages >= pressureThreshold - case _ ⇒ false + case x ⇒ false } } else { routees count { case a: LocalActorRef ⇒ val cell = a.underlying - cell.mailbox.isScheduled && cell.currentMessage != null - case _ ⇒ false + a.underlying.mailbox.isScheduled && cell.currentMessage != null + case x ⇒ + false } } } @@ -704,7 +771,7 @@ case class DefaultRouterPool( */ def backoff(pressure: Int, capacity: Int): Int = if (backoffThreshold > 0.0 && backoffRate > 0.0 && capacity > 0 && pressure.toDouble / capacity < backoffThreshold) - math.ceil(-1.0 * backoffRate * capacity) toInt + math.floor(-1.0 * backoffRate * capacity) toInt else 0 } diff --git a/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java b/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java index c33a22667b..61b9a573d7 100644 --- a/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java +++ b/akka-docs/java/code/akka/docs/jrouting/RouterViaConfigExample.java @@ -47,5 +47,12 @@ public class RouterViaConfigExample { for (int i = 1; i <= 10; i++) { router.tell(new ExampleActor.Message(i)); } + + //#configurableRoutingWithResizer + ActorRef router2 = system.actorOf(new Props(ExampleActor.class).withRouter(new FromConfig()), "router2"); + //#configurableRoutingWithResizer + for (int i = 1; i <= 10; i++) { + router2.tell(new ExampleActor.Message(i)); + } } } \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java b/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java index 094ac8361f..cc3c45169e 100644 --- a/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java +++ b/akka-docs/java/code/akka/docs/jrouting/RouterViaProgramExample.java @@ -4,6 +4,7 @@ package akka.docs.jrouting; import akka.routing.RoundRobinRouter; +import akka.routing.DefaultResizer; import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.UntypedActor; @@ -56,5 +57,15 @@ public class RouterViaProgramExample { for (int i = 1; i <= 6; i++) { router2.tell(new ExampleActor.Message(i)); } + + //#programmaticRoutingWithResizer + int lowerBound = 2; + int upperBound = 15; + DefaultResizer resizer = new DefaultResizer(lowerBound, upperBound); + ActorRef router3 = system.actorOf(new Props(ExampleActor.class).withRouter(new RoundRobinRouter(nrOfInstances))); + //#programmaticRoutingWithResizer + for (int i = 1; i <= 6; i++) { + router3.tell(new ExampleActor.Message(i)); + } } } \ No newline at end of file diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 16c2a0864d..3544c95a51 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -170,6 +170,25 @@ This message is called ``Broadcast`` and is used in the following manner: Only the actual message is forwarded to the routees, i.e. "Watch out for Davy Jones' locker" in the example above. It is up to the routee implementation whether to handle the broadcast message or not. +Dynamically Resizable Routers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +All routers can be used with a fixed number of routees or with a resize strategy to adjust the number +of routees dynamically. + +This is an example of how to create a resizable router that is defined in configuration: + +.. includecode:: ../scala/code/akka/docs/routing/RouterViaConfigExample.scala#config-resize + +.. includecode:: code/akka/docs/jrouting/RouterViaConfigExample.java#configurableRoutingWithResizer + +Several more configuration options are availble and described in ``akka.actor.deployment.default.resizer`` +section of the reference :ref:`configuration`. + +This is an example of how to programatically create a resizable router: + +.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingWithResizer + Custom Router ^^^^^^^^^^^^^ @@ -218,4 +237,10 @@ If you are interested in how to use the VoteCountRouter it looks like this: .. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crTest +Custom Resizer +************** + +A router with dynamically resizable number of routees is implemented by providing a ``akka.routing.Resizer`` +in ``resizer`` method of the ``RouterConfig``. See ``akka.routing.DefaultResizer`` for inspiration +of how to write your own resize strategy. diff --git a/akka-docs/project/migration-guide-1.3.x-2.0.x.rst b/akka-docs/project/migration-guide-1.3.x-2.0.x.rst index af66da471c..44fd51884c 100644 --- a/akka-docs/project/migration-guide-1.3.x-2.0.x.rst +++ b/akka-docs/project/migration-guide-1.3.x-2.0.x.rst @@ -27,3 +27,8 @@ determines when the actor will stop itself and hence closes the window for a reply to be received; it is independent of the timeout applied when awaiting completion of the :class:`Future`, however, the actor will complete the :class:`Future` with an :class:`AskTimeoutException` when it stops itself. + +ActorPool +--------- + +The ActorPool has been replaced by dynamically resizable routers. diff --git a/akka-docs/scala/code/akka/docs/routing/ActorPoolExample.scala b/akka-docs/scala/code/akka/docs/routing/ActorPoolExample.scala deleted file mode 100644 index 02ee349c22..0000000000 --- a/akka-docs/scala/code/akka/docs/routing/ActorPoolExample.scala +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Copyright (C) 2009-2011 Typesafe Inc. - */ -package akka.docs.routing - -import akka.routing.{ BasicNoBackoffFilter, SmallestMailboxSelector, DefaultActorPool } -import akka.actor.{ ActorRef, Props, Actor } - -//#testPool -class TestPool extends Actor with DefaultActorPool with SmallestMailboxSelector with BasicNoBackoffFilter { - - def capacity(delegates: Seq[ActorRef]) = 5 - protected def receive = _route - def rampupRate = 0.1 - def selectionCount = 1 - def partialFill = true - - def instance(defaults: Props) = context.actorOf(defaults.withCreator(new Actor { - def receive = { - case _ ⇒ // do something - } - })) -} -//#testPool diff --git a/akka-docs/scala/code/akka/docs/routing/BoundedCapacitorExample.scala b/akka-docs/scala/code/akka/docs/routing/BoundedCapacitorExample.scala deleted file mode 100644 index 680cb5fdfe..0000000000 --- a/akka-docs/scala/code/akka/docs/routing/BoundedCapacitorExample.scala +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright (C) 2009-2011 Typesafe Inc. - */ -package akka.docs.routing - -import akka.actor.ActorRef - -//#boundedCapacitor -trait BoundedCapacitor { - def lowerBound: Int - def upperBound: Int - - def capacity(delegates: Seq[ActorRef]): Int = { - val current = delegates length - var delta = _eval(delegates) - val proposed = current + delta - - if (proposed < lowerBound) delta += (lowerBound - proposed) - else if (proposed > upperBound) delta -= (proposed - upperBound) - - delta - } - - protected def _eval(delegates: Seq[ActorRef]): Int -} -//#boundedCapacitor diff --git a/akka-docs/scala/code/akka/docs/routing/CapacityStrategyExample.scala b/akka-docs/scala/code/akka/docs/routing/CapacityStrategyExample.scala deleted file mode 100644 index b80fa28560..0000000000 --- a/akka-docs/scala/code/akka/docs/routing/CapacityStrategyExample.scala +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (C) 2009-2011 Typesafe Inc. - */ -package akka.docs.routing - -import akka.routing.ActorPool -import akka.actor.ActorRef - -//#capacityStrategy -trait CapacityStrategy { - import ActorPool._ - - def pressure(delegates: Seq[ActorRef]): Int - def filter(pressure: Int, capacity: Int): Int - - protected def _eval(delegates: Seq[ActorRef]): Int = - filter(pressure(delegates), delegates.size) -} -//#capacityStrategy \ No newline at end of file diff --git a/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala b/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala index d3c3e848c2..3f06e93b28 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala +++ b/akka-docs/scala/code/akka/docs/routing/RouterViaConfigExample.scala @@ -25,6 +25,17 @@ object RouterWithConfigExample extends App { } } //#config + //#config-resize + akka.actor.deployment { + /router2 { + router = round-robin + resizer { + lower-bound = 2 + upper-bound = 15 + } + } + } + //#config-resize """) val system = ActorSystem("Example", config) //#configurableRouting @@ -32,4 +43,10 @@ object RouterWithConfigExample extends App { "router") //#configurableRouting 1 to 10 foreach { i ⇒ router ! Message(i) } + + //#configurableRoutingWithResizer + val router2 = system.actorOf(Props[ExampleActor].withRouter(FromConfig()), + "router2") + //#configurableRoutingWithResizer + 1 to 10 foreach { i ⇒ router2 ! Message(i) } } \ No newline at end of file diff --git a/akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala b/akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala index 575c2b7b07..783a95d767 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala +++ b/akka-docs/scala/code/akka/docs/routing/RouterViaProgramExample.scala @@ -5,6 +5,7 @@ package akka.docs.routing import akka.routing.RoundRobinRouter import akka.actor.{ ActorRef, Props, Actor, ActorSystem } +import akka.routing.DefaultResizer case class Message1(nbr: Int) @@ -31,4 +32,12 @@ object RoutingProgrammaticallyExample extends App { RoundRobinRouter(routees = routees))) //#programmaticRoutingRoutees 1 to 6 foreach { i ⇒ router2 ! Message1(i) } + + //#programmaticRoutingWithResizer + val resizer = DefaultResizer(lowerBound = 2, upperBound = 15) + val router3 = system.actorOf(Props[ExampleActor1].withRouter( + RoundRobinRouter(resizer = Some(resizer)))) + //#programmaticRoutingWithResizer + 1 to 6 foreach { i ⇒ router3 ! Message1(i) } + } \ No newline at end of file diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index f68b2400a6..1ba967f772 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -171,6 +171,26 @@ This message is called ``Broadcast`` and is used in the following manner: Only the actual message is forwarded to the routees, i.e. "Watch out for Davy Jones' locker" in the example above. It is up to the routee implementation whether to handle the broadcast message or not. +Dynamically Resizable Routers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +All routers can be used with a fixed number of routees or with a resize strategy to adjust the number +of routees dynamically. + +This is an example of how to create a resizable router that is defined in configuration: + +.. includecode:: code/akka/docs/routing/RouterViaConfigExample.scala#config-resize + +.. includecode:: code/akka/docs/routing/RouterViaConfigExample.scala#configurableRoutingWithResizer + +Several more configuration options are availble and described in ``akka.actor.deployment.default.resizer`` +section of the reference :ref:`configuration`. + +This is an example of how to programatically create a resizable router: + +.. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingWithResizer + + Custom Router ^^^^^^^^^^^^^ @@ -217,73 +237,10 @@ All in all the custom router looks like this: If you are interested in how to use the VoteCountRouter you can have a look at the test class `RoutingSpec `_ -Actor Pool ----------- +Custom Resizer +************** -An actor pool routes incoming messages to other actors. It has different semantics however when it comes to how those -actors are managed and selected for dispatch. Therein lies the difference. The pool manages, from start to shutdown, -the lifecycle of all delegated actors. The number of actors in a pool can be fixed or grow and shrink over time. -Also, messages can be routed to more than one actor in the pool if so desired. This is a useful little feature for -accounting for expected failure - especially with remoting - where you can invoke the same request of multiple -actors and just take the first, best response. +A router with dynamically resizable number of routees is implemented by providing a ``akka.routing.Resizer`` +in ``resizer`` method of the ``RouterConfig``. See ``akka.routing.DefaultResizer`` for inspiration +of how to write your own resize strategy. -The actor pool is built around three concepts: capacity, filtering and selection. - -Selection -^^^^^^^^^ - -All pools require a ``Selector`` to be mixed-in. This trait controls how and how many actors in the pool will -receive the incoming message. Define *selectionCount* to some positive number greater than one to route to -multiple actors. Currently two are provided: - -* `SmallestMailboxSelector `_ - Using the exact same logic as the iterator of the same name, the pooled actor with the fewest number of pending messages will be chosen. -* `RoundRobinSelector `_ - Performs a very simple index-based selection, wrapping around the end of the list, very much like the CyclicIterator does. - -Partial Fills -************* - -When selecting more than one pooled actor, its possible that in order to fulfill the requested amount, -the selection set must contain duplicates. By setting ``partialFill`` to ``true``, you instruct the selector to -return only unique actors from the pool. - -Capacity -^^^^^^^^ - -As you'd expect, capacity traits determine how the pool is funded with actors. There are two types of strategies that can be employed: - -* `FixedCapacityStrategy `_ - When you mix this into your actor pool, you define a pool size and when the pool is started, it will have that number of actors within to which messages will be delegated. -* `BoundedCapacityStrategy `_ - When you mix this into your actor pool, you define upper and lower bounds, and when the pool is started, it will have the minimum number of actors in place to handle messages. You must also mix-in a Capacitor and a Filter when using this strategy (see below). - -The *BoundedCapacityStrategy* requires additional logic to function. Specifically it requires a *Capacitor* and a *Filter*. -Capacitors are used to determine the pressure that the pool is under and provide a (usually) raw reading of this information. -Currently we provide for the use of either mailbox backlog or active futures count as a means of evaluating pool pressure. -Each expresses itself as a simple number - a reading of the number of actors either with mailbox sizes over a certain threshold -or blocking a thread waiting on a future to complete or expire. - -Filtering -^^^^^^^^^ - -A *Filter* is a trait that modifies the raw pressure reading returned from a Capacitor such that it drives the -adjustment of the pool capacity to a desired end. More simply, if we just used the pressure reading alone, -we might only ever increase the size of the pool (to respond to overload) or we might only have a single -mechanism for reducing the pool size when/if it became necessary. This behavior is fully under your control -through the use of *Filters*. Let's take a look at some code to see how this works: - -.. includecode:: code/akka/docs/routing/BoundedCapacitorExample.scala#boundedCapacitor - -.. includecode:: code/akka/docs/routing/CapacityStrategyExample.scala#capacityStrategy - -Here we see how the filter function will have the chance to modify the pressure reading to influence the capacity change. -You are free to implement filter() however you like. We provide a -`Filter `_ trait that -evaluates both a rampup and a backoff subfilter to determine how to use the pressure reading to alter the pool capacity. -There are several sub filters available to use, though again you may create whatever makes the most sense for you pool: - -* `BasicRampup `_ - When pressure exceeds current capacity, increase the number of actors in the pool by some factor (*rampupRate*) of the current pool size. -* `BasicBackoff `_ - When the pressure ratio falls under some predefined amount (*backoffThreshold*), decrease the number of actors in the pool by some factor of the current pool size. -* `RunningMeanBackoff `_ - This filter tracks the average pressure-to-capacity over the lifetime of the pool (or since the last time the filter was reset) and will begin to reduce capacity once this mean falls below some predefined amount. The number of actors that will be stopped is determined by some factor of the difference between the current capacity and pressure. The idea behind this filter is to reduce the likelihood of "thrashing" (removing then immediately creating...) pool actors by delaying the backoff until some quiescent stage of the pool. Put another way, use this subfilter to allow quick rampup to handle load and more subtle backoff as that decreases over time. - -Example Usage -^^^^^^^^^^^^^ - -.. includecode:: code/akka/docs/routing/ActorPoolExample.scala#testPool diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index a7836c187d..d8f466c9d2 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -26,10 +26,10 @@ class RemoteDeployer(_settings: ActorSystem.Settings) extends Deployer(_settings if (nodes.isEmpty || deploy.routing == NoRouter) d else { val r = deploy.routing match { - case RoundRobinRouter(x, _) ⇒ RemoteRoundRobinRouter(x, nodes) - case RandomRouter(x, _) ⇒ RemoteRandomRouter(x, nodes) - case BroadcastRouter(x, _) ⇒ RemoteBroadcastRouter(x, nodes) - case ScatterGatherFirstCompletedRouter(x, _, w) ⇒ RemoteScatterGatherFirstCompletedRouter(x, nodes, w) + case RoundRobinRouter(x, _, resizer) ⇒ RemoteRoundRobinRouter(x, nodes, resizer) + case RandomRouter(x, _, resizer) ⇒ RemoteRandomRouter(x, nodes, resizer) + case BroadcastRouter(x, _, resizer) ⇒ RemoteBroadcastRouter(x, nodes, resizer) + case ScatterGatherFirstCompletedRouter(x, _, w, resizer) ⇒ RemoteScatterGatherFirstCompletedRouter(x, nodes, w, resizer) } Some(deploy.copy(routing = r)) } diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala index 748e3694bb..52b2d05618 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala @@ -11,7 +11,7 @@ import akka.config.ConfigurationException import akka.util.Duration trait RemoteRouterConfig extends RouterConfig { - override protected def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): Vector[ActorRef] = (nrOfInstances, routees) match { + override def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { case (_, Nil) ⇒ throw new ConfigurationException("must specify list of remote nodes") case (n, xs) ⇒ val nodes = routees map { @@ -20,7 +20,7 @@ trait RemoteRouterConfig extends RouterConfig { } val node = Stream.continually(nodes).flatten.iterator val impl = context.system.asInstanceOf[ActorSystemImpl] //FIXME should we rely on this cast to work here? - Vector.empty[ActorRef] ++ (for (i ← 1 to nrOfInstances) yield { + IndexedSeq.empty[ActorRef] ++ (for (i ← 1 to nrOfInstances) yield { val name = "c" + i val deploy = Deploy("", ConfigFactory.empty(), None, props.routerConfig, RemoteScope(node.next)) impl.provider.actorOf(impl, props, context.self.asInstanceOf[InternalActorRef], context.self.path / name, false, Some(deploy)) @@ -39,13 +39,20 @@ trait RemoteRouterConfig extends RouterConfig { * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class RemoteRoundRobinRouter(nrOfInstances: Int, routees: Iterable[String]) extends RemoteRouterConfig with RoundRobinLike { +case class RemoteRoundRobinRouter(nrOfInstances: Int, routees: Iterable[String], override val resizer: Option[Resizer] = None) + extends RemoteRouterConfig with RoundRobinLike { /** * Constructor that sets the routees to be used. * Java API */ - def this(n: Int, t: java.util.Collection[String]) = this(n, t.asScala) + def this(n: Int, t: java.lang.Iterable[String]) = this(n, t.asScala) + + /** + * Constructor that sets the resizer to be used. + * Java API + */ + def this(resizer: Resizer) = this(0, Nil, Some(resizer)) } /** @@ -59,13 +66,20 @@ case class RemoteRoundRobinRouter(nrOfInstances: Int, routees: Iterable[String]) * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class RemoteRandomRouter(nrOfInstances: Int, routees: Iterable[String]) extends RemoteRouterConfig with RandomLike { +case class RemoteRandomRouter(nrOfInstances: Int, routees: Iterable[String], override val resizer: Option[Resizer] = None) + extends RemoteRouterConfig with RandomLike { /** * Constructor that sets the routees to be used. * Java API */ - def this(n: Int, t: java.util.Collection[String]) = this(n, t.asScala) + def this(n: Int, t: java.lang.Iterable[String]) = this(n, t.asScala) + + /** + * Constructor that sets the resizer to be used. + * Java API + */ + def this(resizer: Resizer) = this(0, Nil, Some(resizer)) } /** @@ -79,13 +93,20 @@ case class RemoteRandomRouter(nrOfInstances: Int, routees: Iterable[String]) ext * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class RemoteBroadcastRouter(nrOfInstances: Int, routees: Iterable[String]) extends RemoteRouterConfig with BroadcastLike { +case class RemoteBroadcastRouter(nrOfInstances: Int, routees: Iterable[String], override val resizer: Option[Resizer] = None) + extends RemoteRouterConfig with BroadcastLike { /** * Constructor that sets the routees to be used. * Java API */ - def this(n: Int, t: java.util.Collection[String]) = this(n, t.asScala) + def this(n: Int, t: java.lang.Iterable[String]) = this(n, t.asScala) + + /** + * Constructor that sets the resizer to be used. + * Java API + */ + def this(resizer: Resizer) = this(0, Nil, Some(resizer)) } /** @@ -99,12 +120,19 @@ case class RemoteBroadcastRouter(nrOfInstances: Int, routees: Iterable[String]) * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ -case class RemoteScatterGatherFirstCompletedRouter(nrOfInstances: Int, routees: Iterable[String], within: Duration) +case class RemoteScatterGatherFirstCompletedRouter(nrOfInstances: Int, routees: Iterable[String], within: Duration, + override val resizer: Option[Resizer] = None) extends RemoteRouterConfig with ScatterGatherFirstCompletedLike { /** * Constructor that sets the routees to be used. * Java API */ - def this(n: Int, t: java.util.Collection[String], w: Duration) = this(n, t.asScala, w) + def this(n: Int, t: java.lang.Iterable[String], w: Duration) = this(n, t.asScala, w) + + /** + * Constructor that sets the resizer to be used. + * Java API + */ + def this(resizer: Resizer, w: Duration) = this(0, Nil, w, Some(resizer)) } From d3eb9a29a67c053c24d5bc790972dd1f0f717bcd Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 10 Jan 2012 16:29:14 +0100 Subject: [PATCH 28/84] Directing the Http docs to Play! mini --- akka-docs/modules/http.rst | 47 +++----------------------------------- 1 file changed, 3 insertions(+), 44 deletions(-) diff --git a/akka-docs/modules/http.rst b/akka-docs/modules/http.rst index a18b182f0b..8388d65702 100644 --- a/akka-docs/modules/http.rst +++ b/akka-docs/modules/http.rst @@ -7,49 +7,8 @@ HTTP .. contents:: :local: -When deploying in a servlet container: --------------------------------------------- - -If you deploy Akka in a JEE container, don't forget to create an Akka initialization and cleanup hook: - -.. code-block:: scala - - package com.my //<--- your own package - import akka.util.AkkaLoader - import akka.cluster.BootableRemoteActorService - import akka.actor.BootableActorLoaderService - import javax.servlet.{ServletContextListener, ServletContextEvent} - - /** - * This class can be added to web.xml mappings as a listener to start and postStop Akka. - * - * ... - * - * com.my.Initializer - * - * ... - * - */ - class Initializer extends ServletContextListener { - lazy val loader = new AkkaLoader - def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown - def contextInitialized(e: ServletContextEvent): Unit = - loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService) //<--- Important - // loader.boot(true, new BootableActorLoaderService {}) // If you don't need akka-remote - } - -For Java users, it's currently only possible to use BootableActorLoaderService, but you'll need to use: akka.actor.DefaultBootableActorLoaderService +Play! +----- -Then you just declare it in your web.xml: - -.. code-block:: xml - - - ... - - your.package.Initializer - - ... - - +Akka will recommend using `Play! Mini `_ From 0d4763c3b0d913dba72ff724dffa3035ade8c3d3 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 10 Jan 2012 17:50:07 +0100 Subject: [PATCH 29/84] Fixed racy test, which was ported from ActorPoolSpec. See #1557 --- .../test/scala/akka/routing/ResizerSpec.scala | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 16ac1e0c86..f6116835b1 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -155,9 +155,6 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with // make sure the pool starts at the expected lower limit and grows to the upper as needed // as influenced by the backlog of blocking pooled actors - var latch = TestLatch(3) - val count = new AtomicInteger(0) - val resizer = DefaultResizer( lowerBound = 2, upperBound = 4, @@ -168,7 +165,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with val router = system.actorOf(Props(new Actor { def receive = { - case n: Int ⇒ + case (n: Int, latch: TestLatch, count: AtomicInteger) ⇒ (n millis).dilated.sleep count.incrementAndGet latch.countDown() @@ -180,26 +177,29 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(2) - def loop(loops: Int, t: Int) = { - latch = TestLatch(loops) + def loop(loops: Int, t: Int, latch: TestLatch, count: AtomicInteger) = { count.set(0) for (m ← 0 until loops) { - router ! t + router.!((t, latch, count)) (10 millis).dilated.sleep } } // 2 more should go thru without triggering more - loop(2, 200) - Await.ready(latch, TestLatch.DefaultTimeout) - count.get must be(2) + val count1 = new AtomicInteger + val latch1 = TestLatch(2) + loop(2, 200, latch1, count1) + Await.ready(latch1, TestLatch.DefaultTimeout) + count1.get must be(2) Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(2) // a whole bunch should max it out - loop(10, 200) - Await.ready(latch, TestLatch.DefaultTimeout) - count.get must be(10) + val count2 = new AtomicInteger + val latch2 = TestLatch(10) + loop(10, 200, latch2, count2) + Await.ready(latch2, TestLatch.DefaultTimeout) + count2.get must be(10) Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(4) From 762a6017e927f7e7f9ff5a469d0e858af79f1c4c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 10 Jan 2012 17:50:17 +0100 Subject: [PATCH 30/84] Verify config override of router nr-of-instances. See #1607 * It wasn't a bug. I think the confusion came from config without 'router' defined. * Added test * Added some clarification to docs --- .../test/scala/akka/routing/RoutingSpec.scala | 23 ++++++++++++++++++- akka-actor/src/main/resources/reference.conf | 1 + akka-docs/java/routing.rst | 8 +++++-- akka-docs/scala/routing.rst | 7 ++++-- 4 files changed, 34 insertions(+), 5 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 711bf04371..419d367d19 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -15,6 +15,15 @@ import com.typesafe.config.ConfigFactory object RoutingSpec { + val config = """ + akka.actor.deployment { + /router1 { + router = round-robin + nr-of-instances = 3 + } + } + """ + class TestActor extends Actor { def receive = { case _ ⇒ @@ -31,7 +40,7 @@ object RoutingSpec { } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { +class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with ImplicitSender { import akka.routing.RoutingSpec._ @@ -87,6 +96,18 @@ class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { Await.ready(doneLatch, 1 seconds) } + "use configured nr-of-instances when FromConfig" in { + val router = system.actorOf(Props[TestActor].withRouter(FromConfig), "router1") + Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3) + system.stop(router) + } + + "use configured nr-of-instances when router is specified" in { + val router = system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(nrOfInstances = 2)), "router1") + Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3) + system.stop(router) + } + } "no router" must { diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 5b74a24c8f..e016810d2c 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -76,6 +76,7 @@ akka { # supplied in the source code (overridable using create-as below) # - routees.paths: will look the paths up using actorFor and route to # them, i.e. will not create children + # - resizer: dynamically resizable number of routees as specified in resizer below router = "from-code" # number of children to create in case of a non-direct router; this setting diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 3544c95a51..e80514a8fe 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -44,8 +44,9 @@ You can also give the router already created routees as in: When you create a router programatically you define the number of routees *or* you pass already created routees to it. If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. -*It is also worth pointing out that if you define the number of routees in the configuration file then this -value will be used instead of any programmatically sent parameters.* +*It is also worth pointing out that if you define the number of routees (``nr-of-instances`` or ``routees``) in +the configuration file then this value will be used instead of any programmatically sent parameters, but you must +also define the ``router`` property in the configuration.* Once you have the router actor it is just to send messages to it as you would to any actor: @@ -189,6 +190,9 @@ This is an example of how to programatically create a resizable router: .. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingWithResizer +*It is also worth pointing out that if you define the ``router`` in the configuration file then this value +will be used instead of any programmatically sent parameters.* + Custom Router ^^^^^^^^^^^^^ diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 1ba967f772..ad06b67b8b 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -44,8 +44,9 @@ You can also give the router already created routees as in: When you create a router programatically you define the number of routees *or* you pass already created routees to it. If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. -*It is also worth pointing out that if you define the number of routees in the configuration file then this -value will be used instead of any programmatically sent parameters.* +*It is also worth pointing out that if you define the number of routees (``nr-of-instances`` or ``routees``) in +the configuration file then this value will be used instead of any programmatically sent parameters, but you must +also define the ``router`` property in the configuration.* Once you have the router actor it is just to send messages to it as you would to any actor: @@ -190,6 +191,8 @@ This is an example of how to programatically create a resizable router: .. includecode:: code/akka/docs/routing/RouterViaProgramExample.scala#programmaticRoutingWithResizer +*It is also worth pointing out that if you define the ``router`` in the configuration file then this value +will be used instead of any programmatically sent parameters.* Custom Router ^^^^^^^^^^^^^ From 85b673b63ffd61a02ae1393b8115160e4341a108 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 11 Jan 2012 11:13:03 +0100 Subject: [PATCH 31/84] Improvements based on feedback. See #1557 * Renamed resizeOnNthMessage to messagesPerResize * Moved resize state from RouterConfig to RoutedActorRef so that Props can be shared --- .../test/scala/akka/routing/ResizerSpec.scala | 6 +- akka-actor/src/main/resources/reference.conf | 2 +- .../src/main/scala/akka/actor/Deployer.scala | 2 +- .../src/main/scala/akka/routing/Routing.scala | 81 +++++++++---------- 4 files changed, 42 insertions(+), 49 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index f6116835b1..d87d688231 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -130,7 +130,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with lowerBound = 1, upperBound = 3, pressureThreshold = 0, - resizeOnNthMessage = 1) + messagesPerResize = 1) val router = system.actorOf(Props[BusyActor].withRouter(RoundRobinRouter(resizer = Some(resizer)))) @@ -160,7 +160,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with upperBound = 4, rampupRate = 0.1, pressureThreshold = 1, - resizeOnNthMessage = 1, + messagesPerResize = 1, backoffThreshold = 0.0) val router = system.actorOf(Props(new Actor { @@ -214,7 +214,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with backoffRate = 1.0, backoffThreshold = 0.20, pressureThreshold = 1, - resizeOnNthMessage = 1) + messagesPerResize = 1) val router = system.actorOf(Props(new Actor { def receive = { diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index e016810d2c..882fccea55 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -146,7 +146,7 @@ akka { # Number of messages between resize operation. # Use 1 to resize before each message. - resize-on-nth-message = 10 + messages-per-resize = 10 } } } diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index f32bb6fea8..35ec05432a 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -65,7 +65,7 @@ class Deployer(val settings: ActorSystem.Settings) { backoffThreshold = resizerConfig.getDouble("backoff-threshold"), backoffRate = resizerConfig.getDouble("backoff-rate"), stopDelay = Duration(resizerConfig.getMilliseconds("stop-delay"), TimeUnit.MILLISECONDS), - resizeOnNthMessage = resizerConfig.getInt("resize-on-nth-message"))) + messagesPerResize = resizerConfig.getInt("messages-per-resize"))) } else { None } diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index f920eb3df0..377fd2234d 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -24,6 +24,8 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _path) { private val routeeProps = _props.copy(routerConfig = NoRouter) + private val resizeProgress = new AtomicBoolean + private val resizeCounter = new AtomicLong @volatile private var _routees: IndexedSeq[ActorRef] = IndexedSeq.empty[ActorRef] // this MUST be initialized during createRoute @@ -41,6 +43,8 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup } val route = _props.routerConfig.createRoute(routeeProps, actorContext, this) + // initial resize, before message send + resize() def applyRoute(sender: ActorRef, message: Any): Iterable[Destination] = message match { case _: AutoReceivedMessage ⇒ Nil @@ -61,7 +65,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup } override def !(message: Any)(implicit sender: ActorRef = null): Unit = { - _props.routerConfig.resize(routeeProps, actorContext, routees) + resize() val s = if (sender eq null) underlying.system.deadLetters else sender @@ -77,9 +81,21 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup } override def ?(message: Any)(implicit timeout: Timeout): Future[Any] = { - _props.routerConfig.resize(routeeProps, actorContext, routees) + resize() super.?(message)(timeout) } + + def resize() { + for (r ← _props.routerConfig.resizer) { + if (r.isTimeForResize(resizeCounter.getAndIncrement()) && resizeProgress.compareAndSet(false, true)) { + try { + r.resize(routeeProps, actorContext, routees, _props.routerConfig) + } finally { + resizeProgress.set(false) + } + } + } + } } /** @@ -123,9 +139,8 @@ trait RouterConfig { } protected def createAndRegisterRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): Unit = { - resizer match { - case None ⇒ registerRoutees(context, createRoutees(props, context, nrOfInstances, routees)) - case Some(p) ⇒ resize(props, context, context.self.asInstanceOf[RoutedActorRef].routees) + if (resizer.isEmpty) { + registerRoutees(context, createRoutees(props, context, nrOfInstances, routees)) } } @@ -143,23 +158,12 @@ trait RouterConfig { context.self.asInstanceOf[RoutedActorRef].removeRoutees(routees) } + /** + * Routers with dynamically resizable number of routees return the [[akka.routing.Resizer]] + * to use. + */ def resizer: Option[Resizer] = None - private val resizeProgress = new AtomicBoolean - private val resizeCounter = new AtomicLong - - def resize(props: Props, context: ActorContext, currentRoutees: IndexedSeq[ActorRef]) { - for (r ← resizer) { - if (r.isTimeForResize(resizeCounter.getAndIncrement()) && resizeProgress.compareAndSet(false, true)) { - try { - r.resize(props, context, currentRoutees, this) - } finally { - resizeProgress.set(false) - } - } - } - } - } /** @@ -635,7 +639,7 @@ case class DefaultResizer( * Number of messages between resize operation. * Use 1 to resize before each message. */ - resizeOnNthMessage: Int = 10) extends Resizer { + messagesPerResize: Int = 10) extends Resizer { /** * Java API constructor for default values except bounds. @@ -648,9 +652,9 @@ case class DefaultResizer( if (rampupRate < 0.0) throw new IllegalArgumentException("rampupRate must be >= 0.0, was [%s]".format(rampupRate)) if (backoffThreshold > 1.0) throw new IllegalArgumentException("backoffThreshold must be <= 1.0, was [%s]".format(backoffThreshold)) if (backoffRate < 0.0) throw new IllegalArgumentException("backoffRate must be >= 0.0, was [%s]".format(backoffRate)) - if (resizeOnNthMessage <= 0) throw new IllegalArgumentException("resizeOnNthMessage must be > 0, was [%s]".format(resizeOnNthMessage)) + if (messagesPerResize <= 0) throw new IllegalArgumentException("messagesPerResize must be > 0, was [%s]".format(messagesPerResize)) - def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % resizeOnNthMessage == 0) + def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % messagesPerResize == 0) def resize(props: Props, actorContext: ActorContext, currentRoutees: IndexedSeq[ActorRef], routerConfig: RouterConfig) { val requestedCapacity = capacity(currentRoutees) @@ -716,27 +720,16 @@ case class DefaultResizer( * @return number of busy routees, between 0 and routees.size */ def pressure(routees: IndexedSeq[ActorRef]): Int = { - if (pressureThreshold == 1) { - routees count { - case a: LocalActorRef ⇒ - val cell = a.underlying - a.underlying.mailbox.isScheduled && cell.currentMessage != null && a.underlying.mailbox.hasMessages - case x ⇒ - false - } - } else if (pressureThreshold > 1) { - routees count { - case a: LocalActorRef ⇒ a.underlying.mailbox.numberOfMessages >= pressureThreshold - case x ⇒ false - } - } else { - routees count { - case a: LocalActorRef ⇒ - val cell = a.underlying - a.underlying.mailbox.isScheduled && cell.currentMessage != null - case x ⇒ - false - } + routees count { + case a: LocalActorRef ⇒ + val cell = a.underlying + pressureThreshold match { + case 1 ⇒ cell.mailbox.isScheduled && cell.currentMessage != null + case i if i < 1 ⇒ cell.mailbox.isScheduled && cell.currentMessage != null + case threshold ⇒ cell.mailbox.numberOfMessages >= threshold + } + case x ⇒ + false } } From 409cbaf8e2d9fc87797403f357ba00fef42ef35d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 11 Jan 2012 11:30:32 +0100 Subject: [PATCH 32/84] Removed exposure of RoutedActorRef in RouterConfig. See #1618 --- .../test/scala/akka/routing/RoutingSpec.scala | 10 +++--- .../src/main/scala/akka/routing/Routing.scala | 34 +++++++++++-------- .../jrouting/CustomRouterDocTestBase.java | 2 +- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 419d367d19..12fada0880 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -413,12 +413,12 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with "custom router" must { "be started when constructed" in { - val routedActor = system.actorOf(Props[TestActor].withRouter(VoteCountRouter)) + val routedActor = system.actorOf(Props[TestActor].withRouter(VoteCountRouter())) routedActor.isTerminated must be(false) } "count votes as intended - not as in Florida" in { - val routedActor = system.actorOf(Props().withRouter(VoteCountRouter)) + val routedActor = system.actorOf(Props().withRouter(VoteCountRouter())) routedActor ! DemocratVote routedActor ! DemocratVote routedActor ! RepublicanVote @@ -462,12 +462,10 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with //#crActors //#crRouter - object VoteCountRouter extends RouterConfig { + case class VoteCountRouter() extends RouterConfig { //#crRoute - def createRoute(props: Props, - actorContext: ActorContext, - ref: RoutedActorRef): Route = { + def createRoute(routeeProps: Props, actorContext: ActorContext): Route = { val democratActor = actorContext.actorOf(Props(new DemocratActor()), "d") val republicanActor = actorContext.actorOf(Props(new RepublicanActor()), "r") val routees = Vector[ActorRef](democratActor, republicanActor) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 377fd2234d..2a67e7637b 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -42,7 +42,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup abandonedRoutees foreach underlying.unwatch } - val route = _props.routerConfig.createRoute(routeeProps, actorContext, this) + val route = _props.routerConfig.createRoute(routeeProps, actorContext) // initial resize, before message send resize() @@ -111,14 +111,14 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup * do the locking yourself! * * '''Caution:''' Please note that the [[akka.routing.Router]] which needs to - * be returned by `apply()` should not send a message to itself in its + * be returned by `createActor()` should not send a message to itself in its * constructor or `preStart()` or publish its self reference from there: if * someone tries sending a message to that reference before the constructor of * RoutedActorRef has returned, there will be a `NullPointerException`! */ trait RouterConfig { - def createRoute(props: Props, actorContext: ActorContext, ref: RoutedActorRef): Route + def createRoute(routeeProps: Props, actorContext: ActorContext): Route def createActor(): Router = new Router {} @@ -171,15 +171,15 @@ trait RouterConfig { * @see akka.routing.RouterConfig */ abstract class CustomRouterConfig extends RouterConfig { - override def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = { - val customRoute = createCustomRoute(props, context, ref) + override def createRoute(props: Props, context: ActorContext): Route = { + val customRoute = createCustomRoute(props, context) { case (sender, message) ⇒ customRoute.destinationsFor(sender, message) } } - def createCustomRoute(props: Props, context: ActorContext, ref: RoutedActorRef): CustomRoute + def createCustomRoute(props: Props, context: ActorContext): CustomRoute protected def registerRoutees(context: ActorContext, routees: java.util.List[ActorRef]): Unit = { import scala.collection.JavaConverters._ @@ -251,23 +251,23 @@ case class Destination(sender: ActorRef, recipient: ActorRef) * Oxymoron style. */ case object NoRouter extends RouterConfig { - def createRoute(props: Props, actorContext: ActorContext, ref: RoutedActorRef): Route = null + def createRoute(props: Props, actorContext: ActorContext): Route = null } /** * Router configuration which has no default, i.e. external configuration is required. */ case object FromConfig extends RouterConfig { - def createRoute(props: Props, actorContext: ActorContext, ref: RoutedActorRef): Route = - throw new ConfigurationException("router " + ref + " needs external configuration from file (e.g. application.conf)") + def createRoute(props: Props, actorContext: ActorContext): Route = + throw new ConfigurationException("router " + actorContext.self + " needs external configuration from file (e.g. application.conf)") } /** * Java API: Router configuration which has no default, i.e. external configuration is required. */ case class FromConfig() extends RouterConfig { - def createRoute(props: Props, actorContext: ActorContext, ref: RoutedActorRef): Route = - throw new ConfigurationException("router " + ref + " needs external configuration from file (e.g. application.conf)") + def createRoute(props: Props, actorContext: ActorContext): Route = + throw new ConfigurationException("router " + actorContext.self + " needs external configuration from file (e.g. application.conf)") } object RoundRobinRouter { @@ -324,9 +324,10 @@ trait RoundRobinLike { this: RouterConfig ⇒ def routees: Iterable[String] - def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = { + def createRoute(props: Props, context: ActorContext): Route = { createAndRegisterRoutees(props, context, nrOfInstances, routees) + val ref = context.self.asInstanceOf[RoutedActorRef] val next = new AtomicLong(0) def getNext(): ActorRef = { @@ -404,7 +405,8 @@ trait RandomLike { this: RouterConfig ⇒ override def initialValue = SecureRandom.getInstance("SHA1PRNG") } - def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = { + def createRoute(props: Props, context: ActorContext): Route = { + val ref = context.self.asInstanceOf[RoutedActorRef] createAndRegisterRoutees(props, context, nrOfInstances, routees) def getNext(): ActorRef = { @@ -476,7 +478,8 @@ trait BroadcastLike { this: RouterConfig ⇒ def routees: Iterable[String] - def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = { + def createRoute(props: Props, context: ActorContext): Route = { + val ref = context.self.asInstanceOf[RoutedActorRef] createAndRegisterRoutees(props, context, nrOfInstances, routees) { @@ -545,7 +548,8 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ def within: Duration - def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = { + def createRoute(props: Props, context: ActorContext): Route = { + val ref = context.self.asInstanceOf[RoutedActorRef] createAndRegisterRoutees(props, context, nrOfInstances, routees) { diff --git a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java b/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java index 8962b22c57..c89401e5cc 100644 --- a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java +++ b/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java @@ -107,7 +107,7 @@ public class CustomRouterDocTestBase { //#crRoute @Override - public CustomRoute createCustomRoute(Props props, ActorContext context, RoutedActorRef ref) { + public CustomRoute createCustomRoute(Props props, ActorContext context) { final ActorRef democratActor = context.actorOf(new Props(DemocratActor.class), "d"); final ActorRef republicanActor = context.actorOf(new Props(RepublicanActor.class), "r"); List routees = Arrays.asList(new ActorRef[] { democratActor, republicanActor }); From bc7b5c92a074da6c4d3f66ce71774e144e531fe8 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 11 Jan 2012 11:42:03 +0100 Subject: [PATCH 33/84] Added a comment in CustomRouterConfig as suggested --- .../src/main/scala/akka/routing/Routing.scala | 81 ++++++++++--------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 2a67e7637b..69589ae651 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -172,6 +172,7 @@ trait RouterConfig { */ abstract class CustomRouterConfig extends RouterConfig { override def createRoute(props: Props, context: ActorContext): Route = { + // as a bonus, this prevents closing of props and context in the returned Route PartialFunction val customRoute = createCustomRoute(props, context) { @@ -514,7 +515,7 @@ object ScatterGatherFirstCompletedRouter { * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration, - override val resizer: Option[Resizer] = None) + override val resizer: Option[Resizer] = None) extends RouterConfig with ScatterGatherFirstCompletedLike { /** @@ -592,57 +593,57 @@ case class DefaultResizer( */ lowerBound: Int = 1, /** - * The most number of routees the router should ever have. - * Must be greater than or equal to `lowerBound`. - */ + * The most number of routees the router should ever have. + * Must be greater than or equal to `lowerBound`. + */ upperBound: Int = 10, /** - * Threshold to evaluate if routee is considered to be busy (under pressure). - * Implementation depends on this value (default is 1). - *
    - *
  • 0: number of routees currently processing a message.
  • - *
  • 1: number of routees currently processing a message has - * some messages in mailbox.
  • - *
  • > 1: number of routees with at least the configured `pressureThreshold` - * messages in their mailbox. Note that estimating mailbox size of - * default UnboundedMailbox is O(N) operation.
  • - *
- */ + * Threshold to evaluate if routee is considered to be busy (under pressure). + * Implementation depends on this value (default is 1). + *
    + *
  • 0: number of routees currently processing a message.
  • + *
  • 1: number of routees currently processing a message has + * some messages in mailbox.
  • + *
  • > 1: number of routees with at least the configured `pressureThreshold` + * messages in their mailbox. Note that estimating mailbox size of + * default UnboundedMailbox is O(N) operation.
  • + *
+ */ pressureThreshold: Int = 1, /** - * Percentage to increase capacity whenever all routees are busy. - * For example, 0.2 would increase 20% (rounded up), i.e. if current - * capacity is 6 it will request an increase of 2 more routees. - */ + * Percentage to increase capacity whenever all routees are busy. + * For example, 0.2 would increase 20% (rounded up), i.e. if current + * capacity is 6 it will request an increase of 2 more routees. + */ rampupRate: Double = 0.2, /** - * Minimum fraction of busy routees before backing off. - * For example, if this is 0.3, then we'll remove some routees only when - * less than 30% of routees are busy, i.e. if current capacity is 10 and - * 3 are busy then the capacity is unchanged, but if 2 or less are busy - * the capacity is decreased. - * - * Use 0.0 or negative to avoid removal of routees. - */ + * Minimum fraction of busy routees before backing off. + * For example, if this is 0.3, then we'll remove some routees only when + * less than 30% of routees are busy, i.e. if current capacity is 10 and + * 3 are busy then the capacity is unchanged, but if 2 or less are busy + * the capacity is decreased. + * + * Use 0.0 or negative to avoid removal of routees. + */ backoffThreshold: Double = 0.3, /** - * Fraction of routees to be removed when the resizer reaches the - * backoffThreshold. - * For example, 0.1 would decrease 10% (rounded up), i.e. if current - * capacity is 9 it will request an decrease of 1 routee. - */ + * Fraction of routees to be removed when the resizer reaches the + * backoffThreshold. + * For example, 0.1 would decrease 10% (rounded up), i.e. if current + * capacity is 9 it will request an decrease of 1 routee. + */ backoffRate: Double = 0.1, /** - * When the resizer reduce the capacity the abandoned routee actors are stopped - * with PoisonPill after this delay. The reason for the delay is to give concurrent - * messages a chance to be placed in mailbox before sending PoisonPill. - * Use 0 seconds to skip delay. - */ + * When the resizer reduce the capacity the abandoned routee actors are stopped + * with PoisonPill after this delay. The reason for the delay is to give concurrent + * messages a chance to be placed in mailbox before sending PoisonPill. + * Use 0 seconds to skip delay. + */ stopDelay: Duration = 1.second, /** - * Number of messages between resize operation. - * Use 1 to resize before each message. - */ + * Number of messages between resize operation. + * Use 1 to resize before each message. + */ messagesPerResize: Int = 10) extends Resizer { /** From 6a12fb7876b38e0235a7a185abb8149de2ac4c26 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 11 Jan 2012 13:56:38 +0100 Subject: [PATCH 34/84] Implemented SmallestMailboxRouter. See #1619 --- .../test/scala/akka/routing/RoutingSpec.scala | 51 ++++ akka-actor/src/main/resources/reference.conf | 2 +- .../src/main/scala/akka/actor/Deployer.scala | 13 +- .../src/main/scala/akka/routing/Routing.scala | 225 ++++++++++++++---- .../code/akka/docs/jrouting/ParentActor.java | 9 + akka-docs/java/routing.rst | 20 +- .../akka/docs/routing/RouterTypeExample.scala | 9 + akka-docs/scala/routing.rst | 20 +- .../scala/akka/remote/RemoteDeployer.scala | 1 + .../scala/akka/routing/RemoteRouters.scala | 27 +++ 10 files changed, 322 insertions(+), 55 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 12fada0880..9811313688 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -12,6 +12,7 @@ import akka.dispatch.Await import akka.util.Duration import akka.config.ConfigurationException import com.typesafe.config.ConfigFactory +import java.util.concurrent.ConcurrentHashMap object RoutingSpec { @@ -256,6 +257,56 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } } + "smallest mailbox router" must { + "be started when constructed" in { + val routedActor = system.actorOf(Props[TestActor].withRouter(SmallestMailboxRouter(nrOfInstances = 1))) + routedActor.isTerminated must be(false) + } + + "deliver messages to idle actor" in { + val usedActors = new ConcurrentHashMap[Int, String]() + val router = system.actorOf(Props(new Actor { + def receive = { + case busy: TestLatch ⇒ + usedActors.put(0, self.path.toString) + Await.ready(busy, TestLatch.DefaultTimeout) + case (msg: Int, receivedLatch: TestLatch) ⇒ + usedActors.put(msg, self.path.toString) + receivedLatch.countDown() + } + }).withRouter(SmallestMailboxRouter(3))) + + val busy = TestLatch(1) + router ! busy + + val received1 = TestLatch(1) + router.!((1, received1)) + Await.ready(received1, TestLatch.DefaultTimeout) + + val received2 = TestLatch(1) + router.!((2, received2)) + Await.ready(received2, TestLatch.DefaultTimeout) + + val received3 = TestLatch(1) + router.!((3, received3)) + Await.ready(received3, TestLatch.DefaultTimeout) + + busy.countDown() + + val busyPath = usedActors.get(0) + busyPath must not be (null) + + val path1 = usedActors.get(1) + val path2 = usedActors.get(2) + val path3 = usedActors.get(3) + + path1 must not be (busyPath) + path2 must not be (busyPath) + path3 must not be (busyPath) + + } + } + "broadcast router" must { "be started when constructed" in { val routedActor = system.actorOf(Props[TestActor].withRouter(BroadcastRouter(nrOfInstances = 1))) diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 882fccea55..07e363fca9 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -64,7 +64,7 @@ akka { default { # routing (load-balance) scheme to use - # available: "from-code", "round-robin", "random", "scatter-gather", "broadcast" + # available: "from-code", "round-robin", "random", "smallest-mailbox", "scatter-gather", "broadcast" # or: fully qualified class name of the router class # default is "from-code"; # Whether or not an actor is transformed to a Router is decided in code only (Props.withRouter). diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 35ec05432a..23c6da6661 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -71,12 +71,13 @@ class Deployer(val settings: ActorSystem.Settings) { } val router: RouterConfig = deployment.getString("router") match { - case "from-code" ⇒ NoRouter - case "round-robin" ⇒ RoundRobinRouter(nrOfInstances, routees, resizer) - case "random" ⇒ RandomRouter(nrOfInstances, routees, resizer) - case "scatter-gather" ⇒ ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within, resizer) - case "broadcast" ⇒ BroadcastRouter(nrOfInstances, routees, resizer) - case x ⇒ throw new ConfigurationException("unknown router type " + x + " for path " + key) + case "from-code" ⇒ NoRouter + case "round-robin" ⇒ RoundRobinRouter(nrOfInstances, routees, resizer) + case "random" ⇒ RandomRouter(nrOfInstances, routees, resizer) + case "smallest-mailbox" ⇒ SmallestMailboxRouter(nrOfInstances, routees, resizer) + case "scatter-gather" ⇒ ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within, resizer) + case "broadcast" ⇒ BroadcastRouter(nrOfInstances, routees, resizer) + case x ⇒ throw new ConfigurationException("unknown router type " + x + " for path " + key) } val recipe: Option[ActorRecipe] = diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 69589ae651..f021eb867a 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -286,7 +286,7 @@ object RoundRobinRouter { * A Router that uses round-robin to select a connection. For concurrent calls, round robin is just a best effort. *
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the round robin should both create new actors and use the 'routees' actor(s). + * that the router should both create new actors and use the 'routees' actor(s). * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
* The configuration parameter trumps the constructor arguments. This means that @@ -361,7 +361,7 @@ object RandomRouter { * A Router that randomly selects one of the target connections to send a message to. *
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the random router should both create new actors and use the 'routees' actor(s). + * that the router should both create new actors and use the 'routees' actor(s). * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
* The configuration parameter trumps the constructor arguments. This means that @@ -424,6 +424,143 @@ trait RandomLike { this: RouterConfig ⇒ } } +object SmallestMailboxRouter { + def apply(routees: Iterable[ActorRef]) = new SmallestMailboxRouter(routees = routees map (_.path.toString)) + + /** + * Java API to create router with the supplied 'routees' actors. + */ + def create(routees: java.lang.Iterable[ActorRef]): SmallestMailboxRouter = { + import scala.collection.JavaConverters._ + apply(routees.asScala) + } +} +/** + * A Router that tries to send to the routee with fewest messages in mailbox. + * The selection is done in this order: + *
    + *
  • pick any idle routee (not processing message) with empty mailbox
  • + *
  • pick any routee with empty mailbox
  • + *
  • pick routee with fewest pending messages in mailbox
  • + *
  • pick any remote routee, remote actors are consider lowest priority, + * since their mailbox size is unknown
  • + *
+ * + *
+ * Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means + * that the router should both create new actors and use the 'routees' actor(s). + * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. + *
+ * The configuration parameter trumps the constructor arguments. This means that + * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will + * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. + */ +case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None) + extends RouterConfig with SmallestMailboxLike { + + /** + * Constructor that sets nrOfInstances to be created. + * Java API + */ + def this(nr: Int) = { + this(nrOfInstances = nr) + } + + /** + * Constructor that sets the routees to be used. + * Java API + */ + def this(t: java.lang.Iterable[String]) = { + this(routees = iterableAsScalaIterable(t)) + } + + /** + * Constructor that sets the resizer to be used. + * Java API + */ + def this(resizer: Resizer) = this(resizer = Some(resizer)) +} + +trait SmallestMailboxLike { this: RouterConfig ⇒ + + import java.security.SecureRandom + + def nrOfInstances: Int + + def routees: Iterable[String] + + private val random = new ThreadLocal[SecureRandom] { + override def initialValue = SecureRandom.getInstance("SHA1PRNG") + } + + /** + * Returns true if the actor is currently processing a message. + * It will always return false for remote actors. + * Method is exposed to subclasses to be able to implement custom + * routers based on mailbox and actor internal state. + */ + protected def isProcessingMessage(a: ActorRef): Boolean = a match { + case x: LocalActorRef ⇒ + val cell = x.underlying + cell.mailbox.isScheduled && cell.currentMessage != null + case _ ⇒ false + } + + /** + * Returns true if the actor currently has any pending messages + * in the mailbox, i.e. the mailbox is not empty. + * It will always return false for remote actors. + * Method is exposed to subclasses to be able to implement custom + * routers based on mailbox and actor internal state. + */ + protected def hasMessages(a: ActorRef): Boolean = a match { + case x: LocalActorRef ⇒ x.underlying.mailbox.hasMessages + case _ ⇒ false + } + + /** + * Returns the number of pending messages in the mailbox of the actor. + * It will always return 0 for remote actors. + * Method is exposed to subclasses to be able to implement custom + * routers based on mailbox and actor internal state. + */ + protected def numberOfMessages(a: ActorRef): Int = a match { + case x: LocalActorRef ⇒ x.underlying.mailbox.numberOfMessages + case _ ⇒ 0 + } + + def createRoute(props: Props, context: ActorContext): Route = { + val ref = context.self.asInstanceOf[RoutedActorRef] + createAndRegisterRoutees(props, context, nrOfInstances, routees) + + def getNext(): ActorRef = { + // non-local actors mailbox size is unknown, so consider them lowest priority + val local: IndexedSeq[LocalActorRef] = for (a ← ref.routees if a.isInstanceOf[LocalActorRef]) yield a.asInstanceOf[LocalActorRef] + // anyone not processing message and with empty mailbox + val idle = local.find(a ⇒ !isProcessingMessage(a) && !hasMessages(a)) + idle getOrElse { + // anyone with empty mailbox + val emptyMailbox = local.find(a ⇒ !hasMessages(a)) + emptyMailbox getOrElse { + // sort on mailbox size + local.sortBy(a ⇒ numberOfMessages(a)).headOption getOrElse { + // no locals, just pick one, random + ref.routees(random.get.nextInt(ref.routees.size)) + } + } + } + } + + { + case (sender, message) ⇒ + message match { + case Broadcast(msg) ⇒ toAll(sender, ref.routees) + case msg ⇒ List(Destination(sender, getNext())) + } + } + } +} + object BroadcastRouter { def apply(routees: Iterable[ActorRef]) = new BroadcastRouter(routees = routees map (_.path.toString)) @@ -439,7 +576,7 @@ object BroadcastRouter { * A Router that uses broadcasts a message to all its connections. *
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the random router should both create new actors and use the 'routees' actor(s). + * that the router should both create new actors and use the 'routees' actor(s). * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
* The configuration parameter trumps the constructor arguments. This means that @@ -507,7 +644,7 @@ object ScatterGatherFirstCompletedRouter { * Simple router that broadcasts the message to all routees, and replies with the first response. *
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the random router should both create new actors and use the 'routees' actor(s). + * that the router should both create new actors and use the 'routees' actor(s). * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
* The configuration parameter trumps the constructor arguments. This means that @@ -515,7 +652,7 @@ object ScatterGatherFirstCompletedRouter { * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration, - override val resizer: Option[Resizer] = None) + override val resizer: Option[Resizer] = None) extends RouterConfig with ScatterGatherFirstCompletedLike { /** @@ -593,57 +730,57 @@ case class DefaultResizer( */ lowerBound: Int = 1, /** - * The most number of routees the router should ever have. - * Must be greater than or equal to `lowerBound`. - */ + * The most number of routees the router should ever have. + * Must be greater than or equal to `lowerBound`. + */ upperBound: Int = 10, /** - * Threshold to evaluate if routee is considered to be busy (under pressure). - * Implementation depends on this value (default is 1). - *
    - *
  • 0: number of routees currently processing a message.
  • - *
  • 1: number of routees currently processing a message has - * some messages in mailbox.
  • - *
  • > 1: number of routees with at least the configured `pressureThreshold` - * messages in their mailbox. Note that estimating mailbox size of - * default UnboundedMailbox is O(N) operation.
  • - *
- */ + * Threshold to evaluate if routee is considered to be busy (under pressure). + * Implementation depends on this value (default is 1). + *
    + *
  • 0: number of routees currently processing a message.
  • + *
  • 1: number of routees currently processing a message has + * some messages in mailbox.
  • + *
  • > 1: number of routees with at least the configured `pressureThreshold` + * messages in their mailbox. Note that estimating mailbox size of + * default UnboundedMailbox is O(N) operation.
  • + *
+ */ pressureThreshold: Int = 1, /** - * Percentage to increase capacity whenever all routees are busy. - * For example, 0.2 would increase 20% (rounded up), i.e. if current - * capacity is 6 it will request an increase of 2 more routees. - */ + * Percentage to increase capacity whenever all routees are busy. + * For example, 0.2 would increase 20% (rounded up), i.e. if current + * capacity is 6 it will request an increase of 2 more routees. + */ rampupRate: Double = 0.2, /** - * Minimum fraction of busy routees before backing off. - * For example, if this is 0.3, then we'll remove some routees only when - * less than 30% of routees are busy, i.e. if current capacity is 10 and - * 3 are busy then the capacity is unchanged, but if 2 or less are busy - * the capacity is decreased. - * - * Use 0.0 or negative to avoid removal of routees. - */ + * Minimum fraction of busy routees before backing off. + * For example, if this is 0.3, then we'll remove some routees only when + * less than 30% of routees are busy, i.e. if current capacity is 10 and + * 3 are busy then the capacity is unchanged, but if 2 or less are busy + * the capacity is decreased. + * + * Use 0.0 or negative to avoid removal of routees. + */ backoffThreshold: Double = 0.3, /** - * Fraction of routees to be removed when the resizer reaches the - * backoffThreshold. - * For example, 0.1 would decrease 10% (rounded up), i.e. if current - * capacity is 9 it will request an decrease of 1 routee. - */ + * Fraction of routees to be removed when the resizer reaches the + * backoffThreshold. + * For example, 0.1 would decrease 10% (rounded up), i.e. if current + * capacity is 9 it will request an decrease of 1 routee. + */ backoffRate: Double = 0.1, /** - * When the resizer reduce the capacity the abandoned routee actors are stopped - * with PoisonPill after this delay. The reason for the delay is to give concurrent - * messages a chance to be placed in mailbox before sending PoisonPill. - * Use 0 seconds to skip delay. - */ + * When the resizer reduce the capacity the abandoned routee actors are stopped + * with PoisonPill after this delay. The reason for the delay is to give concurrent + * messages a chance to be placed in mailbox before sending PoisonPill. + * Use 0 seconds to skip delay. + */ stopDelay: Duration = 1.second, /** - * Number of messages between resize operation. - * Use 1 to resize before each message. - */ + * Number of messages between resize operation. + * Use 1 to resize before each message. + */ messagesPerResize: Int = 10) extends Resizer { /** diff --git a/akka-docs/java/code/akka/docs/jrouting/ParentActor.java b/akka-docs/java/code/akka/docs/jrouting/ParentActor.java index c8d8b019bb..2125ae35a8 100644 --- a/akka-docs/java/code/akka/docs/jrouting/ParentActor.java +++ b/akka-docs/java/code/akka/docs/jrouting/ParentActor.java @@ -7,6 +7,7 @@ import akka.routing.ScatterGatherFirstCompletedRouter; import akka.routing.BroadcastRouter; import akka.routing.RandomRouter; import akka.routing.RoundRobinRouter; +import akka.routing.SmallestMailboxRouter; import akka.actor.UntypedActor; import akka.actor.ActorRef; import akka.actor.Props; @@ -34,6 +35,14 @@ public class ParentActor extends UntypedActor { randomRouter.tell(i, getSelf()); } //#randomRouter + } else if (msg.equals("smr")) { + //#smallestMailboxRouter + ActorRef smallestMailboxRouter = getContext().actorOf( + new Props(PrintlnActor.class).withRouter(new SmallestMailboxRouter(5)), "router"); + for (int i = 1; i <= 10; i++) { + smallestMailboxRouter.tell(i, getSelf()); + } + //#smallestMailboxRouter } else if (msg.equals("br")) { //#broadcastRouter ActorRef broadcastRouter = getContext().actorOf(new Props(PrintlnActor.class).withRouter(new BroadcastRouter(5)), diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index e80514a8fe..8cc5b94260 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -16,11 +16,12 @@ Router A Router is an actor that routes incoming messages to outbound actors. The router routes the messages sent to it to its underlying actors called 'routees'. -Akka comes with four defined routers out of the box, but as you will see in this chapter it -is really easy to create your own. The four routers shipped with Akka are: +Akka comes with some defined routers out of the box, but as you will see in this chapter it +is really easy to create your own. The routers shipped with Akka are: * ``akka.routing.RoundRobinRouter`` * ``akka.routing.RandomRouter`` +* ``akka.routing.SmallestMailboxRouter`` * ``akka.routing.BroadcastRouter`` * ``akka.routing.ScatterGatherFirstCompletedRouter`` @@ -122,6 +123,21 @@ When run you should see a similar output to this: The result from running the random router should be different, or at least random, every time you run it. Try to run it a couple of times to verify its behavior if you don't trust us. +SmallestMailboxRouter +********************* +A Router that tries to send to the routee with fewest messages in mailbox. +The selection is done in this order: + + * pick any idle routee (not processing message) with empty mailbox + * pick any routee with empty mailbox + * pick routee with fewest pending messages in mailbox + * pick any remote routee, remote actors are consider lowest priority, + since their mailbox size is unknown + +Code example: + +.. includecode:: code/akka/docs/jrouting/ParentActor.java#smallestMailboxRouter + BroadcastRouter *************** A broadcast router forwards the message it receives to *all* its routees. diff --git a/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala b/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala index d688da6544..3a9f566ed8 100644 --- a/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala +++ b/akka-docs/scala/code/akka/docs/routing/RouterTypeExample.scala @@ -8,6 +8,7 @@ import annotation.tailrec import akka.actor.{ Props, Actor } import akka.util.duration._ import akka.dispatch.Await +import akka.routing.SmallestMailboxRouter case class FibonacciNumber(nbr: Int) @@ -59,6 +60,14 @@ class ParentActor extends Actor { i ⇒ randomRouter ! i } //#randomRouter + case "smr" ⇒ + //#smallestMailboxRouter + val smallestMailboxRouter = + context.actorOf(Props[PrintlnActor].withRouter(SmallestMailboxRouter(5)), "router") + 1 to 10 foreach { + i ⇒ smallestMailboxRouter ! i + } + //#smallestMailboxRouter case "br" ⇒ //#broadcastRouter val broadcastRouter = diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index ad06b67b8b..8c0e0f7366 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -16,11 +16,12 @@ Router A Router is an actor that routes incoming messages to outbound actors. The router routes the messages sent to it to its underlying actors called 'routees'. -Akka comes with four defined routers out of the box, but as you will see in this chapter it -is really easy to create your own. The four routers shipped with Akka are: +Akka comes with some defined routers out of the box, but as you will see in this chapter it +is really easy to create your own. The routers shipped with Akka are: * ``akka.routing.RoundRobinRouter`` * ``akka.routing.RandomRouter`` +* ``akka.routing.SmallestMailboxRouter`` * ``akka.routing.BroadcastRouter`` * ``akka.routing.ScatterGatherFirstCompletedRouter`` @@ -123,6 +124,21 @@ When run you should see a similar output to this: The result from running the random router should be different, or at least random, every time you run it. Try to run it a couple of times to verify its behavior if you don't trust us. +SmallestMailboxRouter +********************* +A Router that tries to send to the routee with fewest messages in mailbox. +The selection is done in this order: + + * pick any idle routee (not processing message) with empty mailbox + * pick any routee with empty mailbox + * pick routee with fewest pending messages in mailbox + * pick any remote routee, remote actors are consider lowest priority, + since their mailbox size is unknown + +Code example: + +.. includecode:: code/akka/docs/routing/RouterTypeExample.scala#smallestMailboxRouter + BroadcastRouter *************** A broadcast router forwards the message it receives to *all* its routees. diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index d8f466c9d2..fe6844b8dc 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -28,6 +28,7 @@ class RemoteDeployer(_settings: ActorSystem.Settings) extends Deployer(_settings val r = deploy.routing match { case RoundRobinRouter(x, _, resizer) ⇒ RemoteRoundRobinRouter(x, nodes, resizer) case RandomRouter(x, _, resizer) ⇒ RemoteRandomRouter(x, nodes, resizer) + case SmallestMailboxRouter(x, _, resizer) ⇒ RemoteSmallestMailboxRouter(x, nodes, resizer) case BroadcastRouter(x, _, resizer) ⇒ RemoteBroadcastRouter(x, nodes, resizer) case ScatterGatherFirstCompletedRouter(x, _, w, resizer) ⇒ RemoteScatterGatherFirstCompletedRouter(x, nodes, w, resizer) } diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala index 52b2d05618..83a64d09a7 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala @@ -82,6 +82,33 @@ case class RemoteRandomRouter(nrOfInstances: Int, routees: Iterable[String], ove def this(resizer: Resizer) = this(0, Nil, Some(resizer)) } +/** + * A Router that tries to send to routee with fewest messages in mailbox. + *
+ * Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means + * that the random router should both create new actors and use the 'routees' actor(s). + * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. + *
+ * The configuration parameter trumps the constructor arguments. This means that + * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will + * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. + */ +case class RemoteSmallestMailboxRouter(nrOfInstances: Int, routees: Iterable[String], override val resizer: Option[Resizer] = None) + extends RemoteRouterConfig with SmallestMailboxLike { + + /** + * Constructor that sets the routees to be used. + * Java API + */ + def this(n: Int, t: java.lang.Iterable[String]) = this(n, t.asScala) + + /** + * Constructor that sets the resizer to be used. + * Java API + */ + def this(resizer: Resizer) = this(0, Nil, Some(resizer)) +} + /** * A Router that uses broadcasts a message to all its connections. *
From edc1d8046f1df851177d301813f3b879b07eabd1 Mon Sep 17 00:00:00 2001 From: Eugene Vigdorchik Date: Thu, 12 Jan 2012 12:47:11 +0400 Subject: [PATCH 35/84] Integrate schoir for distributed testing. --- .gitignore | 2 ++ akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala | 3 --- project/AkkaBuild.scala | 3 ++- project/plugins.sbt | 5 +++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 8ec862d18c..b80964477d 100755 --- a/.gitignore +++ b/.gitignore @@ -61,3 +61,5 @@ akka.sublime-workspace .target .multi-jvm _mb +schoir.props +worker*.log \ No newline at end of file diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala b/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala index 52f5dc3b33..96bccb13fa 100755 --- a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala @@ -28,7 +28,6 @@ object ZkClient extends Watcher { zk.exists("/", false); true } catch { case _: KeeperException.ConnectionLossException => - println("Server is not ready, sleeping...") Thread.sleep(10000) false } @@ -61,7 +60,6 @@ object ZkClient extends Watcher { } def enter() { - println("ZK creating " + root + "/" + name) zk.create(root + "/" + name, Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL) @@ -69,7 +67,6 @@ object ZkClient extends Watcher { } final def leave() { - println("ZK leaving " + root + "/" + name) zk.create(root + "/" + name + ".leave", Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 8f6e656e46..ada3dbcbd2 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -8,6 +8,7 @@ import sbt._ import sbt.Keys._ import com.typesafe.sbtmultijvm.MultiJvmPlugin import com.typesafe.sbtmultijvm.MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions } +import com.typesafe.schoir.SchoirPlugin.schoirSettings import com.typesafe.sbtscalariform.ScalariformPlugin import com.typesafe.sbtscalariform.ScalariformPlugin.ScalariformKeys import java.lang.Boolean.getBoolean @@ -70,7 +71,7 @@ object AkkaBuild extends Build { id = "akka-remote", base = file("akka-remote"), dependencies = Seq(actor, actorTests % "test->test", testkit % "test->test"), - settings = defaultSettings ++ multiJvmSettings ++ Seq( + settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( libraryDependencies ++= Dependencies.cluster, // disable parallel tests parallelExecution in Test := false, diff --git a/project/plugins.sbt b/project/plugins.sbt index b9dfcde215..024f70877c 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,7 +1,9 @@ resolvers += Classpaths.typesafeResolver -addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.7") +addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9") + +addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.1") addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse" % "1.5.0") @@ -12,4 +14,3 @@ resolvers ++= Seq( "coda" at "http://repo.codahale.com") addSbtPlugin("me.lessis" % "ls-sbt" % "0.1.1") - From 2399f02531ce1d8fda33f5f65bbf951f2671976a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 12 Jan 2012 09:53:53 +0100 Subject: [PATCH 36/84] Improvements based on feedback. See #1619 --- .../test/scala/akka/routing/ResizerSpec.scala | 6 +- .../test/scala/akka/routing/RoutingSpec.scala | 15 ++- .../src/main/scala/akka/routing/Routing.scala | 98 +++++++++++++------ akka-docs/java/routing.rst | 7 +- akka-docs/scala/routing.rst | 7 +- 5 files changed, 86 insertions(+), 47 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index d87d688231..6ccad2a95f 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -135,15 +135,15 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with val router = system.actorOf(Props[BusyActor].withRouter(RoundRobinRouter(resizer = Some(resizer)))) val latch1 = new TestLatch(1) - router.!((latch1, busy)) + router ! (latch1, busy) Await.ready(latch1, 2 seconds) val latch2 = new TestLatch(1) - router.!((latch2, busy)) + router ! (latch2, busy) Await.ready(latch2, 2 seconds) val latch3 = new TestLatch(1) - router.!((latch3, busy)) + router ! (latch3, busy) Await.ready(latch3, 2 seconds) Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 9811313688..077e69e5d9 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -267,28 +267,33 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with val usedActors = new ConcurrentHashMap[Int, String]() val router = system.actorOf(Props(new Actor { def receive = { - case busy: TestLatch ⇒ + case (busy: TestLatch, receivedLatch: TestLatch) ⇒ usedActors.put(0, self.path.toString) + self ! "another in busy mailbox" + receivedLatch.countDown() Await.ready(busy, TestLatch.DefaultTimeout) case (msg: Int, receivedLatch: TestLatch) ⇒ usedActors.put(msg, self.path.toString) receivedLatch.countDown() + case s: String ⇒ } }).withRouter(SmallestMailboxRouter(3))) val busy = TestLatch(1) - router ! busy + val received0 = TestLatch(1) + router ! (busy, received0) + Await.ready(received0, TestLatch.DefaultTimeout) val received1 = TestLatch(1) - router.!((1, received1)) + router ! (1, received1) Await.ready(received1, TestLatch.DefaultTimeout) val received2 = TestLatch(1) - router.!((2, received2)) + router ! (2, received2) Await.ready(received2, TestLatch.DefaultTimeout) val received3 = TestLatch(1) - router.!((3, received3)) + router ! (3, received3) Await.ready(received3, TestLatch.DefaultTimeout) busy.countDown() diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index f021eb867a..f3065788ec 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -290,8 +290,11 @@ object RoundRobinRouter { * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
* The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. + * if you provide either 'nrOfInstances' or 'routees' during instantiation they will + * be ignored if the router is defined in the configuration file for the actor being used. + * + * @param routees string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None) extends RouterConfig with RoundRobinLike { @@ -307,9 +310,11 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = /** * Constructor that sets the routees to be used. * Java API + * @param routeePaths string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(t: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(t)) + def this(routeePaths: java.lang.Iterable[String]) = { + this(routees = iterableAsScalaIterable(routeePaths)) } /** @@ -365,8 +370,11 @@ object RandomRouter { * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
* The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. + * if you provide either 'nrOfInstances' or 'routees' during instantiation they will + * be ignored if the router is defined in the configuration file for the actor being used. + * + * @param routees string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None) extends RouterConfig with RandomLike { @@ -382,9 +390,11 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, /** * Constructor that sets the routees to be used. * Java API + * @param routeePaths string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(t: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(t)) + def this(routeePaths: java.lang.Iterable[String]) = { + this(routees = iterableAsScalaIterable(routeePaths)) } /** @@ -436,7 +446,7 @@ object SmallestMailboxRouter { } } /** - * A Router that tries to send to the routee with fewest messages in mailbox. + * A Router that tries to send to the non-suspended routee with fewest messages in mailbox. * The selection is done in this order: *
    *
  • pick any idle routee (not processing message) with empty mailbox
  • @@ -452,8 +462,11 @@ object SmallestMailboxRouter { * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
    * The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. + * if you provide either 'nrOfInstances' or 'routees' during instantiation they will + * be ignored if the router is defined in the configuration file for the actor being used. + * + * @param routees string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None) extends RouterConfig with SmallestMailboxLike { @@ -469,9 +482,11 @@ case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[Strin /** * Constructor that sets the routees to be used. * Java API + * @param routeePaths string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(t: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(t)) + def this(routeePaths: java.lang.Iterable[String]) = { + this(routees = iterableAsScalaIterable(routeePaths)) } /** @@ -518,6 +533,19 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ case _ ⇒ false } + /** + * Returns true if the actor is currently suspended. + * It will always return false for remote actors. + * Method is exposed to subclasses to be able to implement custom + * routers based on mailbox and actor internal state. + */ + protected def isSuspended(a: ActorRef): Boolean = a match { + case x: LocalActorRef ⇒ + val cell = x.underlying + cell.mailbox.isSuspended + case _ ⇒ false + } + /** * Returns the number of pending messages in the mailbox of the actor. * It will always return 0 for remote actors. @@ -535,16 +563,14 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ def getNext(): ActorRef = { // non-local actors mailbox size is unknown, so consider them lowest priority - val local: IndexedSeq[LocalActorRef] = for (a ← ref.routees if a.isInstanceOf[LocalActorRef]) yield a.asInstanceOf[LocalActorRef] - // anyone not processing message and with empty mailbox - val idle = local.find(a ⇒ !isProcessingMessage(a) && !hasMessages(a)) - idle getOrElse { - // anyone with empty mailbox - val emptyMailbox = local.find(a ⇒ !hasMessages(a)) - emptyMailbox getOrElse { - // sort on mailbox size - local.sortBy(a ⇒ numberOfMessages(a)).headOption getOrElse { - // no locals, just pick one, random + val activeLocal = ref.routees collect { case l: LocalActorRef if !isSuspended(l) ⇒ l } + // 1. anyone not processing message and with empty mailbox + activeLocal.find(a ⇒ !isProcessingMessage(a) && !hasMessages(a)) getOrElse { + // 2. anyone with empty mailbox + activeLocal.find(a ⇒ !hasMessages(a)) getOrElse { + // 3. sort on mailbox size + activeLocal.sortBy(a ⇒ numberOfMessages(a)).headOption getOrElse { + // 4. no locals, just pick one, random ref.routees(random.get.nextInt(ref.routees.size)) } } @@ -580,8 +606,11 @@ object BroadcastRouter { * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
    * The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. + * if you provide either 'nrOfInstances' or 'routees' during instantiation they will + * be ignored if the router is defined in the configuration file for the actor being used. + * + * @param routees string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None) extends RouterConfig with BroadcastLike { @@ -597,9 +626,11 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N /** * Constructor that sets the routees to be used. * Java API + * @param routeePaths string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(t: java.lang.Iterable[String]) = { - this(routees = iterableAsScalaIterable(t)) + def this(routeePaths: java.lang.Iterable[String]) = { + this(routees = iterableAsScalaIterable(routeePaths)) } /** @@ -648,8 +679,11 @@ object ScatterGatherFirstCompletedRouter { * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. *
    * The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. + * if you provide either 'nrOfInstances' or 'routees' during instantiation they will + * be ignored if the router is defined in the configuration file for the actor being used. + * + * @param routees string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration, override val resizer: Option[Resizer] = None) @@ -666,9 +700,11 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It /** * Constructor that sets the routees to be used. * Java API + * @param routeePaths string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] */ - def this(t: java.lang.Iterable[String], w: Duration) = { - this(routees = iterableAsScalaIterable(t), within = w) + def this(routeePaths: java.lang.Iterable[String], w: Duration) = { + this(routees = iterableAsScalaIterable(routeePaths), within = w) } /** diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index 8cc5b94260..cdcc869b2a 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -45,9 +45,8 @@ You can also give the router already created routees as in: When you create a router programatically you define the number of routees *or* you pass already created routees to it. If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. -*It is also worth pointing out that if you define the number of routees (``nr-of-instances`` or ``routees``) in -the configuration file then this value will be used instead of any programmatically sent parameters, but you must -also define the ``router`` property in the configuration.* +*It is also worth pointing out that if you define the ``router`` in the configuration file then this value will be used +instead of any programmatically sent parameters.* Once you have the router actor it is just to send messages to it as you would to any actor: @@ -125,7 +124,7 @@ Try to run it a couple of times to verify its behavior if you don't trust us. SmallestMailboxRouter ********************* -A Router that tries to send to the routee with fewest messages in mailbox. +A Router that tries to send to the non-suspended routee with fewest messages in mailbox. The selection is done in this order: * pick any idle routee (not processing message) with empty mailbox diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 8c0e0f7366..4e75be8798 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -45,9 +45,8 @@ You can also give the router already created routees as in: When you create a router programatically you define the number of routees *or* you pass already created routees to it. If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded. -*It is also worth pointing out that if you define the number of routees (``nr-of-instances`` or ``routees``) in -the configuration file then this value will be used instead of any programmatically sent parameters, but you must -also define the ``router`` property in the configuration.* +*It is also worth pointing out that if you define the ``router`` in the configuration file then this value will be used +instead of any programmatically sent parameters.* Once you have the router actor it is just to send messages to it as you would to any actor: @@ -126,7 +125,7 @@ Try to run it a couple of times to verify its behavior if you don't trust us. SmallestMailboxRouter ********************* -A Router that tries to send to the routee with fewest messages in mailbox. +A Router that tries to send to the non-suspended routee with fewest messages in mailbox. The selection is done in this order: * pick any idle routee (not processing message) with empty mailbox From e7262e1a699c5b4aa198ef3617635046dad83427 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 12 Jan 2012 12:43:27 +0100 Subject: [PATCH 37/84] Removing ListenerManagement and making FSM extend Listeners (but retaining its subscribe/unsubscribe messages --- .../src/main/scala/akka/actor/FSM.scala | 12 +- .../main/scala/akka/routing/Listeners.scala | 2 +- .../src/main/scala/akka/routing/Routing.scala | 80 +- .../scala/akka/util/ListenerManagement.scala | 74 - akka-docs/additional/benchmarks.rst | 22 - akka-docs/additional/index.rst | 1 - akka-docs/disabled/camel.rst | 2901 ----------------- akka-docs/disabled/http.rst | 105 - akka-docs/disabled/microkernel.rst | 40 - 9 files changed, 46 insertions(+), 3191 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/util/ListenerManagement.scala delete mode 100644 akka-docs/additional/benchmarks.rst delete mode 100644 akka-docs/disabled/camel.rst delete mode 100644 akka-docs/disabled/http.rst delete mode 100644 akka-docs/disabled/microkernel.rst diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 1f8f9cba70..681351dca3 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -8,6 +8,7 @@ import akka.util._ import scala.collection.mutable import akka.event.Logging import akka.util.Duration._ +import akka.routing.Listeners object FSM { @@ -179,7 +180,7 @@ object FSM { * timerActive_? ("tock") * */ -trait FSM[S, D] extends ListenerManagement { +trait FSM[S, D] extends Listeners { this: Actor ⇒ import FSM._ @@ -447,9 +448,6 @@ trait FSM[S, D] extends ListenerManagement { for (te ← transitionEvent) { if (te.isDefinedAt(tuple)) te(tuple) } } - // ListenerManagement shall not start() or stop() listener actors - override protected val manageLifeCycleOfListeners = false - /* * ******************************************* * Main actor receive() method @@ -474,11 +472,11 @@ trait FSM[S, D] extends ListenerManagement { } case SubscribeTransitionCallBack(actorRef) ⇒ // TODO use DeathWatch to clean up list - addListener(actorRef) + listeners.add(actorRef) // send current state back as reference point actorRef ! CurrentState(self, currentState.stateName) case UnsubscribeTransitionCallBack(actorRef) ⇒ - removeListener(actorRef) + listeners.remove(actorRef) case value ⇒ { if (timeoutFuture.isDefined) { timeoutFuture.get.cancel() @@ -523,7 +521,7 @@ trait FSM[S, D] extends ListenerManagement { if (currentState.stateName != nextState.stateName) { this.nextState = nextState handleTransition(currentState.stateName, nextState.stateName) - notifyListeners(Transition(self, currentState.stateName, nextState.stateName)) + gossip(Transition(self, currentState.stateName, nextState.stateName)) } currentState = nextState val timeout = if (currentState.timeout.isDefined) currentState.timeout else stateTimeouts(currentState.stateName) diff --git a/akka-actor/src/main/scala/akka/routing/Listeners.scala b/akka-actor/src/main/scala/akka/routing/Listeners.scala index 7ec56014f8..05f22f8b4a 100644 --- a/akka-actor/src/main/scala/akka/routing/Listeners.scala +++ b/akka-actor/src/main/scala/akka/routing/Listeners.scala @@ -25,7 +25,7 @@ case class WithListeners(f: (ActorRef) ⇒ Unit) extends ListenerMessage * Send WithListeners(fun) to traverse the current listeners. */ trait Listeners { self: Actor ⇒ - private val listeners = new ConcurrentSkipListSet[ActorRef] + protected val listeners = new ConcurrentSkipListSet[ActorRef] protected def listenerManagement: Actor.Receive = { case Listen(l) ⇒ listeners add l diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 69589ae651..5e7c9ae701 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -515,7 +515,7 @@ object ScatterGatherFirstCompletedRouter { * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration, - override val resizer: Option[Resizer] = None) + override val resizer: Option[Resizer] = None) extends RouterConfig with ScatterGatherFirstCompletedLike { /** @@ -593,57 +593,57 @@ case class DefaultResizer( */ lowerBound: Int = 1, /** - * The most number of routees the router should ever have. - * Must be greater than or equal to `lowerBound`. - */ + * The most number of routees the router should ever have. + * Must be greater than or equal to `lowerBound`. + */ upperBound: Int = 10, /** - * Threshold to evaluate if routee is considered to be busy (under pressure). - * Implementation depends on this value (default is 1). - *
      - *
    • 0: number of routees currently processing a message.
    • - *
    • 1: number of routees currently processing a message has - * some messages in mailbox.
    • - *
    • > 1: number of routees with at least the configured `pressureThreshold` - * messages in their mailbox. Note that estimating mailbox size of - * default UnboundedMailbox is O(N) operation.
    • - *
    - */ + * Threshold to evaluate if routee is considered to be busy (under pressure). + * Implementation depends on this value (default is 1). + *
      + *
    • 0: number of routees currently processing a message.
    • + *
    • 1: number of routees currently processing a message has + * some messages in mailbox.
    • + *
    • > 1: number of routees with at least the configured `pressureThreshold` + * messages in their mailbox. Note that estimating mailbox size of + * default UnboundedMailbox is O(N) operation.
    • + *
    + */ pressureThreshold: Int = 1, /** - * Percentage to increase capacity whenever all routees are busy. - * For example, 0.2 would increase 20% (rounded up), i.e. if current - * capacity is 6 it will request an increase of 2 more routees. - */ + * Percentage to increase capacity whenever all routees are busy. + * For example, 0.2 would increase 20% (rounded up), i.e. if current + * capacity is 6 it will request an increase of 2 more routees. + */ rampupRate: Double = 0.2, /** - * Minimum fraction of busy routees before backing off. - * For example, if this is 0.3, then we'll remove some routees only when - * less than 30% of routees are busy, i.e. if current capacity is 10 and - * 3 are busy then the capacity is unchanged, but if 2 or less are busy - * the capacity is decreased. - * - * Use 0.0 or negative to avoid removal of routees. - */ + * Minimum fraction of busy routees before backing off. + * For example, if this is 0.3, then we'll remove some routees only when + * less than 30% of routees are busy, i.e. if current capacity is 10 and + * 3 are busy then the capacity is unchanged, but if 2 or less are busy + * the capacity is decreased. + * + * Use 0.0 or negative to avoid removal of routees. + */ backoffThreshold: Double = 0.3, /** - * Fraction of routees to be removed when the resizer reaches the - * backoffThreshold. - * For example, 0.1 would decrease 10% (rounded up), i.e. if current - * capacity is 9 it will request an decrease of 1 routee. - */ + * Fraction of routees to be removed when the resizer reaches the + * backoffThreshold. + * For example, 0.1 would decrease 10% (rounded up), i.e. if current + * capacity is 9 it will request an decrease of 1 routee. + */ backoffRate: Double = 0.1, /** - * When the resizer reduce the capacity the abandoned routee actors are stopped - * with PoisonPill after this delay. The reason for the delay is to give concurrent - * messages a chance to be placed in mailbox before sending PoisonPill. - * Use 0 seconds to skip delay. - */ + * When the resizer reduce the capacity the abandoned routee actors are stopped + * with PoisonPill after this delay. The reason for the delay is to give concurrent + * messages a chance to be placed in mailbox before sending PoisonPill. + * Use 0 seconds to skip delay. + */ stopDelay: Duration = 1.second, /** - * Number of messages between resize operation. - * Use 1 to resize before each message. - */ + * Number of messages between resize operation. + * Use 1 to resize before each message. + */ messagesPerResize: Int = 10) extends Resizer { /** diff --git a/akka-actor/src/main/scala/akka/util/ListenerManagement.scala b/akka-actor/src/main/scala/akka/util/ListenerManagement.scala deleted file mode 100644 index 1d6df328d5..0000000000 --- a/akka-actor/src/main/scala/akka/util/ListenerManagement.scala +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright (C) 2009-2011 Typesafe Inc. - */ - -package akka.util - -import akka.actor.Actor - -import java.util.concurrent.ConcurrentSkipListSet -import akka.actor.{ ActorInitializationException, ActorRef } - -/** - * A manager for listener actors. Intended for mixin by observables. - */ -trait ListenerManagement { this: Actor ⇒ - - private val listeners = new ConcurrentSkipListSet[ActorRef] - - /** - * Specifies whether listeners should be started when added and stopped when removed or not - */ - protected def manageLifeCycleOfListeners: Boolean = true - - /** - * Adds the listener this this registry's listener list. - * The listener is started by this method if manageLifeCycleOfListeners yields true. - */ - def addListener(listener: ActorRef) { - listeners add listener - } - - /** - * Removes the listener this this registry's listener list. - * The listener is stopped by this method if manageLifeCycleOfListeners yields true. - */ - def removeListener(listener: ActorRef) { - listeners remove listener - if (manageLifeCycleOfListeners) context.stop(listener) - } - - /* - * Returns whether there are any listeners currently - */ - def hasListeners: Boolean = !listeners.isEmpty - - /** - * Checks if a specific listener is registered. Pruned eventually when isTerminated==true in notify. - */ - def hasListener(listener: ActorRef): Boolean = listeners.contains(listener) - - protected[akka] def notifyListeners(message: ⇒ Any) { - if (hasListeners) { - val msg = message - val iterator = listeners.iterator - while (iterator.hasNext) { - val listener = iterator.next - if (listener.isTerminated) iterator.remove() - else listener ! msg - } - } - } - - /** - * Execute f with each listener as argument. - */ - protected[akka] def foreachListener(f: (ActorRef) ⇒ Unit) { - val iterator = listeners.iterator - while (iterator.hasNext) { - val listener = iterator.next - if (listener.isTerminated) iterator.remove() - else f(listener) - } - } -} diff --git a/akka-docs/additional/benchmarks.rst b/akka-docs/additional/benchmarks.rst deleted file mode 100644 index 19b577274f..0000000000 --- a/akka-docs/additional/benchmarks.rst +++ /dev/null @@ -1,22 +0,0 @@ -Benchmarks -========== - -Scalability, Throughput and Latency benchmark ---------------------------------------------- - -Simple Trading system. - -- `Here is the result with some graphs `_ -- `Here is the code `_ - -Compares: - -- Scala library Actors - - - Fire-forget - - Request-reply - -- Akka - - Request-reply - - Fire-forget with default dispatcher - - Fire-forget with Hawt dispatcher diff --git a/akka-docs/additional/index.rst b/akka-docs/additional/index.rst index f108deef51..811d2bb230 100644 --- a/akka-docs/additional/index.rst +++ b/akka-docs/additional/index.rst @@ -4,7 +4,6 @@ Additional Information .. toctree:: :maxdepth: 2 - benchmarks recipes companies-using-akka third-party-integrations diff --git a/akka-docs/disabled/camel.rst b/akka-docs/disabled/camel.rst deleted file mode 100644 index fd9d6c1181..0000000000 --- a/akka-docs/disabled/camel.rst +++ /dev/null @@ -1,2901 +0,0 @@ - -.. _camel-module: - -####### - Camel -####### - -======= -.. _Appendix E - Akka and Camel: http://www.manning.com/ibsen/appEsample.pdf -.. _Camel in Action: http://www.manning.com/ibsen/ - -Contents: - -.. contents:: :local: - -Other, more advanced external articles are: - -* `Akka Consumer Actors: New Features and Best Practices `_ -* `Akka Producer Actors: New Features and Best Practices `_ - - -Introduction -============ - -The akka-camel module allows actors, untyped actors, and typed actors to receive -and send messages over a great variety of protocols and APIs. This section gives -a brief overview of the general ideas behind the akka-camel module, the -remaining sections go into the details. In addition to the native Scala and Java -actor API, actors can now exchange messages with other systems over large number -of protocols and APIs such as HTTP, SOAP, TCP, FTP, SMTP or JMS, to mention a -few. At the moment, approximately 80 protocols and APIs are supported. - -The akka-camel module is based on `Apache Camel`_, a powerful and leight-weight -integration framework for the JVM. For an introduction to Apache Camel you may -want to read this `Apache Camel article`_. Camel comes with a -large number of `components`_ that provide bindings to different protocols and -APIs. The `camel-extra`_ project provides further components. - -.. _Apache Camel: http://camel.apache.org/ -.. _Apache Camel article: http://architects.dzone.com/articles/apache-camel-integration -.. _components: http://camel.apache.org/components.html -.. _camel-extra: http://code.google.com/p/camel-extra/ - -Usage of Camel's integration components in Akka is essentially a -one-liner. Here's an example. - -.. code-block:: scala - - import akka.actor.Actor - import akka.actor.Actor._ - import akka.camel.{Message, Consumer} - - class MyActor extends Actor with Consumer { - def endpointUri = "mina:tcp://localhost:6200?textline=true" - - def receive = { - case msg: Message => { /* ... */} - case _ => { /* ... */} - } - } - - // start and expose actor via tcp - val myActor = actorOf(Props[MyActor]) - -The above example exposes an actor over a tcp endpoint on port 6200 via Apache -Camel's `Mina component`_. The actor implements the endpointUri method to define -an endpoint from which it can receive messages. After starting the actor, tcp -clients can immediately send messages to and receive responses from that -actor. If the message exchange should go over HTTP (via Camel's `Jetty -component`_), only the actor's endpointUri method must be changed. - -.. _Mina component: http://camel.apache.org/mina.html -.. _Jetty component: http://camel.apache.org/jetty.html - -.. code-block:: scala - - class MyActor extends Actor with Consumer { - def endpointUri = "jetty:http://localhost:8877/example" - - def receive = { - case msg: Message => { /* ... */} - case _ => { /* ... */} - } - } - -Actors can also trigger message exchanges with external systems i.e. produce to -Camel endpoints. - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.{Producer, Oneway} - - class MyActor extends Actor with Producer with Oneway { - def endpointUri = "jms:queue:example" - } - -In the above example, any message sent to this actor will be added (produced) to -the example JMS queue. Producer actors may choose from the same set of Camel -components as Consumer actors do. - -The number of Camel components is constantly increasing. The akka-camel module -can support these in a plug-and-play manner. Just add them to your application's -classpath, define a component-specific endpoint URI and use it to exchange -messages over the component-specific protocols or APIs. This is possible because -Camel components bind protocol-specific message formats to a Camel-specific -`normalized message format`__. The normalized message format hides -protocol-specific details from Akka and makes it therefore very easy to support -a large number of protocols through a uniform Camel component interface. The -akka-camel module further converts mutable Camel messages into `immutable -representations`__ which are used by Consumer and Producer actors for pattern -matching, transformation, serialization or storage, for example. - -__ https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/Message.java -__ http://github.com/jboner/akka/blob/v0.8/akka-camel/src/main/scala/akka/Message.scala#L17 - - -Dependencies -============ - -Akka's Camel Integration consists of two modules - -* akka-camel - this module depends on akka-actor and camel-core (+ transitive - dependencies) and implements the Camel integration for (untyped) actors - -* akka-camel-typed - this module depends on akka-typed-actor and akka-camel (+ - transitive dependencies) and implements the Camel integration for typed actors - -The akka-camel-typed module is optional. To have both untyped and typed actors -working with Camel, add the following dependencies to your SBT project -definition. - -.. code-block:: scala - - import sbt._ - - class Project(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { - // ... - val akkaCamel = akkaModule("camel") - val akkaCamelTyped = akkaModule("camel-typed") // optional typed actor support - // ... - } - - -.. _camel-consume-messages: - -Consume messages -================ - -Actors (untyped) ----------------- - -For actors (Scala) to receive messages, they must mixin the `Consumer`_ -trait. For example, the following actor class (Consumer1) implements the -endpointUri method, which is declared in the Consumer trait, in order to receive -messages from the ``file:data/input/actor`` Camel endpoint. Untyped actors -(Java) need to extend the abstract UntypedConsumerActor class and implement the -getEndpointUri() and onReceive(Object) methods. - -.. _Consumer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Consumer.scala - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.{Message, Consumer} - - class Consumer1 extends Actor with Consumer { - def endpointUri = "file:data/input/actor" - - def receive = { - case msg: Message => println("received %s" format msg.bodyAs[String]) - } - } - -**Java** - -.. code-block:: java - - import akka.camel.Message; - import akka.camel.UntypedConsumerActor; - - public class Consumer1 extends UntypedConsumerActor { - public String getEndpointUri() { - return "file:data/input/actor"; - } - - public void onReceive(Object message) { - Message msg = (Message)message; - String body = msg.getBodyAs(String.class); - System.out.println(String.format("received %s", body)) - } - } - -Whenever a file is put into the data/input/actor directory, its content is -picked up by the Camel `file component`_ and sent as message to the -actor. Messages consumed by actors from Camel endpoints are of type -`Message`_. These are immutable representations of Camel messages. - -.. _file component: http://camel.apache.org/file2.html -.. _Message: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Message.scala - -For Message usage examples refer to the unit tests: - -* Message unit tests - `Scala API `_ -* Message unit tests - `Java API `_ - -Here's another example that sets the endpointUri to -``jetty:http://localhost:8877/camel/default``. It causes Camel's `Jetty -component`_ to start an embedded `Jetty`_ server, accepting HTTP connections -from localhost on port 8877. - -.. _Jetty component: http://camel.apache.org/jetty.html -.. _Jetty: http://www.eclipse.org/jetty/ - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.{Message, Consumer} - - class Consumer2 extends Actor with Consumer { - def endpointUri = "jetty:http://localhost:8877/camel/default" - - def receive = { - case msg: Message => self.reply("Hello %s" format msg.bodyAs[String]) - } - } - -**Java** - -.. code-block:: java - - import akka.camel.Message; - import akka.camel.UntypedConsumerActor; - - public class Consumer2 extends UntypedConsumerActor { - public String getEndpointUri() { - return "jetty:http://localhost:8877/camel/default"; - } - - public void onReceive(Object message) { - Message msg = (Message)message; - String body = msg.getBodyAs(String.class); - getContext().tryReply(String.format("Hello %s", body)); - } - } - -After starting the actor, clients can send messages to that actor by POSTing to -``http://localhost:8877/camel/default``. The actor sends a response by using the -self.reply method (Scala). For returning a message body and headers to the HTTP -client the response type should be `Message`_. For any other response type, a -new Message object is created by akka-camel with the actor response as message -body. - -.. _Message: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Message.scala - - -Typed actors ------------- - -Typed actors can also receive messages from Camel endpoints. In contrast to -(untyped) actors, which only implement a single receive or onReceive method, a -typed actor may define several (message processing) methods, each of which can -receive messages from a different Camel endpoint. For a typed actor method to be -exposed as Camel endpoint it must be annotated with the `@consume -annotation`_. For example, the following typed consumer actor defines two -methods, foo and bar. - -.. _@consume annotation: http://github.com/jboner/akka/blob/master/akka-camel/src/main/java/akka/camel/consume.java - -**Scala** - -.. code-block:: scala - - import org.apache.camel.{Body, Header} - import akka.actor.TypedActor - import akka.camel.consume - - trait TypedConsumer1 { - @consume("file:data/input/foo") - def foo(body: String): Unit - - @consume("jetty:http://localhost:8877/camel/bar") - def bar(@Body body: String, @Header("X-Whatever") header: String): String - } - - class TypedConsumer1Impl extends TypedActor with TypedConsumer1 { - def foo(body: String) = println("Received message: %s" format body) - def bar(body: String, header: String) = "body=%s header=%s" format (body, header) - } - -**Java** - -.. code-block:: java - - import org.apache.camel.Body; - import org.apache.camel.Header; - import akka.actor.TypedActor; - import akka.camel.consume; - - public interface TypedConsumer1 { - @consume("file:data/input/foo") - public void foo(String body); - - @consume("jetty:http://localhost:8877/camel/bar") - public String bar(@Body String body, @Header("X-Whatever") String header); - } - - public class TypedConsumer1Impl extends TypedActor implements TypedConsumer1 { - public void foo(String body) { - System.out.println(String.format("Received message: ", body)); - } - - public String bar(String body, String header) { - return String.format("body=%s header=%s", body, header); - } - } - -The foo method can be invoked by placing a file in the data/input/foo -directory. Camel picks up the file from this directory and akka-camel invokes -foo with the file content as argument (converted to a String). Camel -automatically tries to convert messages to appropriate types as defined by the -method parameter(s). The conversion rules are described in detail on the -following pages: - -* `Bean integration `_ -* `Bean binding `_ -* `Parameter binding `_ - -The bar method can be invoked by POSTing a message to -http://localhost:8877/camel/bar. Here, parameter binding annotations are used to -tell Camel how to extract data from the HTTP message. The @Body annotation binds -the HTTP request body to the first parameter, the @Header annotation binds the -X-Whatever header to the second parameter. The return value is sent as HTTP -response message body to the client. - -Parameter binding annotations must be placed on the interface, the @consume -annotation can also be placed on the methods in the implementation class. - - -.. _camel-publishing: - -Consumer publishing -------------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -Publishing a consumer actor at its Camel endpoint occurs when the actor is -started. Publication is done asynchronously; setting up an endpoint (more -precisely, the route from that endpoint to the actor) may still be in progress -after the ActorRef method returned. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor._ - - val actor = actorOf(Props[Consumer1]) // create Consumer actor and activate endpoint in background - -**Java** - -.. code-block:: java - - import static akka.actor.Actors.*; - import akka.actor.ActorRef; - - ActorRef actor = actorOf(new Props(Consumer1.class)); // create Consumer actor and activate endpoint in background - - -Typed actors -^^^^^^^^^^^^ - -Publishing of typed actor methods is done when the typed actor is created with -one of the TypedActor.newInstance(..) methods. Publication is done in the -background here as well i.e. it may still be in progress when -TypedActor.newInstance(..) returns. - -**Scala** - -.. code-block:: scala - - import akka.actor.TypedActor - - // create TypedConsumer1 object and activate endpoint(s) in background - val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl]) - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - - // create TypedConsumer1 object and activate endpoint(s) in background - TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class); - - -.. _camel-consumers-and-camel-service: - -Consumers and the CamelService ------------------------------- - -Publishing of consumer actors or typed actor methods requires a running -CamelService. The Akka :ref:`microkernel` can start a CamelService automatically -(see :ref:`camel-configuration`). When using Akka in other environments, a -CamelService must be started manually. Applications can do that by calling the -CamelServiceManager.startCamelService method. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - startCamelService - -**Java** - -.. code-block:: java - - import static akka.camel.CamelServiceManager.*; - - startCamelService(); - -If applications need to wait for a certain number of consumer actors or typed -actor methods to be published they can do so with the -``CamelServiceManager.mandatoryService.awaitEndpointActivation`` method, where -``CamelServiceManager.mandatoryService`` is the current CamelService instance -(or throws an IllegalStateException there's no current CamelService). - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - startCamelService - - // Wait for three conumer endpoints to be activated - mandatoryService.awaitEndpointActivation(3) { - // Start three consumer actors (for example) - // ... - } - - // Communicate with consumer actors via their activated endpoints - // ... - -**Java** - -.. code-block:: java - - import akka.japi.SideEffect; - import static akka.camel.CamelServiceManager.*; - - startCamelService(); - - // Wait for three conumer endpoints to be activated - getMandatoryService().awaitEndpointActivation(3, new SideEffect() { - public void apply() { - // Start three consumer actors (for example) - // ... - } - }); - - // Communicate with consumer actors via their activated endpoints - // ... - -Alternatively, one can also use ``Option[CamelService]`` returned by -``CamelServiceManager.service``. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - startCamelService - - for(s <- service) s.awaitEndpointActivation(3) { - // ... - } - -**Java** - -.. code-block:: java - - import java.util.concurrent.CountDownLatch; - - import akka.camel.CamelService; - import static akka.camel.CamelServiceManager.*; - - startCamelService(); - - for (CamelService s : getService()) s.awaitEndpointActivation(3, new SideEffect() { - public void apply() { - // ... - } - }); - -:ref:`camel-configuration` additionally describes how a CamelContext, that is -managed by a CamelService, can be cutomized before starting the service. When -the CamelService is no longer needed, it should be stopped. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - stopCamelService - -**Java** - -.. code-block:: java - - import static akka.camel.CamelServiceManager.*; - - stopCamelService(); - - -.. _camel-unpublishing: - -Consumer un-publishing ----------------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -When an actor is stopped, the route from the endpoint to that actor is stopped -as well. For example, stopping an actor that has been previously published at -``http://localhost:8877/camel/test`` will cause a connection failure when trying -to access that endpoint. Stopping the route is done asynchronously; it may be -still in progress after the ``ActorRef.stop`` method returned. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor._ - - val actor = actorOf(Props[Consumer1]) // create Consumer actor - actor // activate endpoint in background - // ... - actor.stop // deactivate endpoint in background - -**Java** - -.. code-block:: java - - import static akka.actor.Actors.*; - import akka.actor.ActorRef; - - ActorRef actor = actorOf(new Props(Consumer1.class)); // create Consumer actor and activate endpoint in background - // ... - actor.stop(); // deactivate endpoint in background - - -Typed actors -^^^^^^^^^^^^ - -When a typed actor is stopped, routes to @consume annotated methods of this -typed actors are stopped as well. Stopping the routes is done asynchronously; it -may be still in progress after the TypedActor.stop method returned. - -**Scala** - -.. code-block:: scala - - import akka.actor.TypedActor - - // create TypedConsumer1 object and activate endpoint(s) in background - val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl]) - - // deactivate endpoints in background - TypedActor.stop(consumer) - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - - // Create typed consumer actor and activate endpoints in background - TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class); - - // Deactivate endpoints in background - TypedActor.stop(consumer); - - -.. _camel-acknowledgements: - -Acknowledgements ----------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -With in-out message exchanges, clients usually know that a message exchange is -done when they receive a reply from a consumer actor. The reply message can be a -Message (or any object which is then internally converted to a Message) on -success, and a Failure message on failure. - -With in-only message exchanges, by default, an exchange is done when a message -is added to the consumer actor's mailbox. Any failure or exception that occurs -during processing of that message by the consumer actor cannot be reported back -to the endpoint in this case. To allow consumer actors to positively or -negatively acknowledge the receipt of a message from an in-only message -exchange, they need to override the ``autoack`` (Scala) or ``isAutoack`` (Java) -method to return false. In this case, consumer actors must reply either with a -special Ack message (positive acknowledgement) or a Failure (negative -acknowledgement). - -**Scala** - -.. code-block:: scala - - import akka.camel.{Ack, Failure} - // ... other imports omitted - - class Consumer3 extends Actor with Consumer { - override def autoack = false - - def endpointUri = "jms:queue:test" - - def receive = { - // ... - self.reply(Ack) // on success - // ... - self.reply(Failure(...)) // on failure - } - } - -**Java** - -.. code-block:: java - - import akka.camel.Failure - import static akka.camel.Ack.ack; - // ... other imports omitted - - public class Consumer3 extends UntypedConsumerActor { - - public String getEndpointUri() { - return "jms:queue:test"; - } - - public boolean isAutoack() { - return false; - } - - public void onReceive(Object message) { - // ... - getContext().reply(ack()) // on success - // ... - val e: Exception = ... - getContext().reply(new Failure(e)) // on failure - } - } - - -.. _camel-blocking-exchanges: - -Blocking exchanges ------------------- - -By default, message exchanges between a Camel endpoint and a consumer actor are -non-blocking because, internally, the ! (bang) operator is used to commicate -with the actor. The route to the actor does not block waiting for a reply. The -reply is sent asynchronously (see also :ref:`camel-asynchronous-routing`). -Consumer actors however can be configured to make this interaction blocking. - -**Scala** - -.. code-block:: scala - - class ExampleConsumer extends Actor with Consumer { - override def blocking = true - - def endpointUri = ... - def receive = { - // ... - } - } - -**Java** - -.. code-block:: java - - public class ExampleConsumer extends UntypedConsumerActor { - - public boolean isBlocking() { - return true; - } - - public String getEndpointUri() { - // ... - } - - public void onReceive(Object message) { - // ... - } - } - -In this case, the ``!!`` (bangbang) operator is used internally to communicate -with the actor which blocks a thread until the consumer sends a response or -throws an exception within receive. Although it may decrease scalability, this -setting can simplify error handling (see `this article`_) or allows timeout -configurations on actor-level (see :ref:`camel-timeout`). - -.. _this article: http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html - - -.. _camel-timeout: - -Consumer timeout ----------------- - -Endpoints that support two-way communications need to wait for a response from -an (untyped) actor or typed actor before returning it to the initiating client. -For some endpoint types, timeout values can be defined in an endpoint-specific -way which is described in the documentation of the individual `Camel -components`_. Another option is to configure timeouts on the level of consumer -actors and typed consumer actors. - -.. _Camel components: http://camel.apache.org/components.html - - -Typed actors -^^^^^^^^^^^^ - -For typed actors, timeout values for method calls that return a result can be -set when the typed actor is created. In the following example, the timeout is -set to 20 seconds (default is 5 seconds). - -**Scala** - -.. code-block:: scala - - import akka.actor.TypedActor - - val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl], 20000 /* 20 seconds */) - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - - TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class, 20000 /* 20 seconds */); - - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -Two-way communications between a Camel endpoint and an (untyped) actor are -initiated by sending the request message to the actor with the ``!`` (bang) -operator and the actor replies to the endpoint when the response is ready. In -order to support timeouts on actor-level, endpoints need to send the request -message with the ``!!`` (bangbang) operator for which a timeout value is -applicable. This can be achieved by overriding the Consumer.blocking method to -return true. - -**Scala** - -.. code-block:: scala - - class Consumer2 extends Actor with Consumer { - self.timeout = 20000 // timeout set to 20 seconds - - override def blocking = true - - def endpointUri = "direct:example" - - def receive = { - // ... - } - } - -**Java** - -.. code-block:: java - - public class Consumer2 extends UntypedConsumerActor { - - public Consumer2() { - getContext().setTimeout(20000); // timeout set to 20 seconds - } - - public String getEndpointUri() { - return "direct:example"; - } - - public boolean isBlocking() { - return true; - } - - public void onReceive(Object message) { - // ... - } - } - -This is a valid approach for all endpoint types that do not "natively" support -asynchronous two-way message exchanges. For all other endpoint types (like -`Jetty`_ endpoints) is it not recommended to switch to blocking mode but rather -to configure timeouts in an endpoint-specific way (see -also :ref:`camel-asynchronous-routing`). - - -Remote consumers ----------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -Publishing of remote consumer actors is always done on the server side, local -proxies are never published. Hence the CamelService must be started on the -remote node. For example, to publish an (untyped) actor on a remote node at -endpoint URI ``jetty:http://localhost:6644/remote-actor-1``, define the -following consumer actor class. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.annotation.consume - import akka.camel.Consumer - - class RemoteActor1 extends Actor with Consumer { - def endpointUri = "jetty:http://localhost:6644/remote-actor-1" - - protected def receive = { - case msg => self.reply("response from remote actor 1") - } - } - -**Java** - -.. code-block:: java - - import akka.camel.UntypedConsumerActor; - - public class RemoteActor1 extends UntypedConsumerActor { - public String getEndpointUri() { - return "jetty:http://localhost:6644/remote-actor-1"; - } - - public void onReceive(Object message) { - getContext().tryReply("response from remote actor 1"); - } - } - -On the remote node, start a `CamelService`_, start a remote server, create the -actor and register it at the remote server. - -.. _CamelService: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/CamelService.scala - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - import akka.actor.Actor._ - import akka.actor.ActorRef - - // ... - startCamelService - - val consumer = val consumer = actorOf(Props[RemoteActor1]) - - remote.start("localhost", 7777) - remote.register(consumer) // register and start remote consumer - // ... - -**Java** - -.. code-block:: java - - import akka.camel.CamelServiceManager; - import static akka.actor.Actors.*; - - // ... - CamelServiceManager.startCamelService(); - - ActorRef actor = actorOf(new Props(RemoteActor1.class)); - - remote().start("localhost", 7777); - remote().register(actor); // register and start remote consumer - // ... - -Explicitly starting a CamelService can be omitted when Akka is running in Kernel -mode, for example (see also :ref:`camel-configuration`). - - -Typed actors -^^^^^^^^^^^^ - -Remote typed consumer actors can be registered with one of the -``registerTyped*`` methods on the remote server. The following example registers -the actor with the custom id "123". - -**Scala** - -.. code-block:: scala - - import akka.actor.TypedActor - - // ... - val obj = TypedActor.newRemoteInstance( - classOf[SampleRemoteTypedConsumer], - classOf[SampleRemoteTypedConsumerImpl]) - - remote.registerTypedActor("123", obj) - // ... - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - - SampleRemoteTypedConsumer obj = (SampleRemoteTypedConsumer)TypedActor.newInstance( - SampleRemoteTypedConsumer.class, - SampleRemoteTypedConsumerImpl.class); - - remote.registerTypedActor("123", obj) - // ... - - -Produce messages -================ - -A minimum pre-requisite for producing messages to Camel endpoints with producer -actors (see below) is an initialized and started CamelContextManager. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelContextManager - - CamelContextManager.init // optionally takes a CamelContext as argument - CamelContextManager.start // starts the managed CamelContext - -**Java** - -.. code-block:: java - - import akka.camel.CamelContextManager; - - CamelContextManager.init(); // optionally takes a CamelContext as argument - CamelContextManager; // starts the managed CamelContext - -For using producer actors, application may also start a CamelService. This will -not only setup a CamelContextManager behind the scenes but also register -listeners at the actor registry (needed to publish consumer actors). If your -application uses producer actors only and you don't want to have the (very -small) overhead generated by the registry listeners then setting up a -CamelContextManager without starting CamelService is recommended. Otherwise, -just start a CamelService as described for consumer -actors: :ref:`camel-consumers-and-camel-service`. - - -Producer trait --------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -For sending messages to Camel endpoints, actors - -* written in Scala need to mixin the `Producer`_ trait and implement the - endpointUri method. - -* written in Java need to extend the abstract UntypedProducerActor class and - implement the getEndpointUri() method. By extending the UntypedProducerActor - class, untyped actors (Java) inherit the behaviour of the Producer trait. - -.. _Producer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Producer.scala - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.Producer - - class Producer1 extends Actor with Producer { - def endpointUri = "http://localhost:8080/news" - } - -**Java** - -.. code-block:: java - - import akka.camel.UntypedProducerActor; - - public class Producer1 extends UntypedProducerActor { - public String getEndpointUri() { - return "http://localhost:8080/news"; - } - } - -Producer1 inherits a default implementation of the receive method from the -Producer trait. To customize a producer actor's default behavior it is -recommended to override the Producer.receiveBeforeProduce and -Producer.receiveAfterProduce methods. This is explained later in more detail. -Actors should not override the default Producer.receive method. - -Any message sent to a Producer actor (or UntypedProducerActor) will be sent to -the associated Camel endpoint, in the above example to -``http://localhost:8080/news``. Response messages (if supported by the -configured endpoint) will, by default, be returned to the original sender. The -following example uses the ``?`` operator (Scala) to send a message to a -Producer actor and waits for a response. In Java, the sendRequestReply method is -used. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor._ - import akka.actor.ActorRef - - val producer = actorOf(Props[Producer1]) - val response = (producer ? "akka rocks").get - val body = response.bodyAs[String] - -**Java** - -.. code-block:: java - - import akka.actor.ActorRef; - import static akka.actor.Actors.*; - import akka.camel.Message; - - ActorRef producer = actorOf(new Props(Producer1.class)); - Message response = (Message)producer.sendRequestReply("akka rocks"); - String body = response.getBodyAs(String.class) - -If the message is sent using the ! operator (or the tell method in Java) -then the response message is sent back asynchronously to the original sender. In -the following example, a Sender actor sends a message (a String) to a producer -actor using the ! operator and asynchronously receives a response (of type -Message). - -**Scala** - -.. code-block:: scala - - import akka.actor.{Actor, ActorRef} - import akka.camel.Message - - class Sender(producer: ActorRef) extends Actor { - def receive = { - case request: String => producer ! request - case response: Message => { - /* process response ... */ - } - // ... - } - } - -**Java** - -.. code-block:: java - - // TODO - - -.. _camel-custom-processing: - -Custom Processing -^^^^^^^^^^^^^^^^^ - -Instead of replying to the initial sender, producer actors can implement custom -reponse processing by overriding the receiveAfterProduce method (Scala) or -onReceiveAfterProduce method (Java). In the following example, the reponse -message is forwarded to a target actor instead of being replied to the original -sender. - -**Scala** - -.. code-block:: scala - - import akka.actor.{Actor, ActorRef} - import akka.camel.Producer - - class Producer1(target: ActorRef) extends Actor with Producer { - def endpointUri = "http://localhost:8080/news" - - override protected def receiveAfterProduce = { - // do not reply but forward result to target - case msg => target forward msg - } - } - -**Java** - -.. code-block:: java - - import akka.actor.ActorRef; - import akka.camel.UntypedProducerActor; - - public class Producer1 extends UntypedProducerActor { - private ActorRef target; - - public Producer1(ActorRef target) { - this.target = target; - } - - public String getEndpointUri() { - return "http://localhost:8080/news"; - } - - @Override - public void onReceiveAfterProduce(Object message) { - target.forward((Message)message, getContext()); - } - } - -To create an untyped actor instance with a constructor argument, a factory is -needed (this should be doable without a factory in upcoming Akka versions). - -.. code-block:: java - - import akka.actor.ActorRef; - import akka.actor.UntypedActorFactory; - import akka.actor.UntypedActor; - - public class Producer1Factory implements UntypedActorFactory { - - private ActorRef target; - - public Producer1Factory(ActorRef target) { - this.target = target; - } - - public UntypedActor create() { - return new Producer1(target); - } - } - -The instanitation is done with the Actors.actorOf method and the factory as -argument. - -.. code-block:: java - - import static akka.actor.Actors.*; - import akka.actor.ActorRef; - - ActorRef target = ... - ActorRef producer = actorOf(Props(new Producer1Factory(target))); - producer; - -Before producing messages to endpoints, producer actors can pre-process them by -overriding the receiveBeforeProduce method (Scala) or onReceiveBeforeProduce -method (Java). - -**Scala** - -.. code-block:: scala - - import akka.actor.{Actor, ActorRef} - import akka.camel.{Message, Producer} - - class Producer1(target: ActorRef) extends Actor with Producer { - def endpointUri = "http://localhost:8080/news" - - override protected def receiveBeforeProduce = { - case msg: Message => { - // do some pre-processing (e.g. add endpoint-specific message headers) - // ... - - // and return the modified message - msg - } - } - } - -**Java** - -.. code-block:: java - - import akka.actor.ActorRef; - import akka.camel.Message - import akka.camel.UntypedProducerActor; - - public class Producer1 extends UntypedProducerActor { - private ActorRef target; - - public Producer1(ActorRef target) { - this.target = target; - } - - public String getEndpointUri() { - return "http://localhost:8080/news"; - } - - @Override - public Object onReceiveBeforeProduce(Object message) { - Message msg = (Message)message; - // do some pre-processing (e.g. add endpoint-specific message headers) - // ... - - // and return the modified message - return msg - } - } - - -Producer configuration options -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The interaction of producer actors with Camel endpoints can be configured to be -one-way or two-way (by initiating in-only or in-out message exchanges, -respectively). By default, the producer initiates an in-out message exchange -with the endpoint. For initiating an in-only exchange, producer actors - -* written in Scala either have to override the oneway method to return true -* written in Java have to override the isOneway method to return true. - -**Scala** - -.. code-block:: scala - - import akka.camel.Producer - - class Producer2 extends Actor with Producer { - def endpointUri = "jms:queue:test" - override def oneway = true - } - -**Java** - -.. code-block:: java - - import akka.camel.UntypedProducerActor; - - public class SampleUntypedReplyingProducer extends UntypedProducerActor { - public String getEndpointUri() { - return "jms:queue:test"; - } - - @Override - public boolean isOneway() { - return true; - } - } - -Message correlation -^^^^^^^^^^^^^^^^^^^ - -To correlate request with response messages, applications can set the -Message.MessageExchangeId message header. - -**Scala** - -.. code-block:: scala - - import akka.camel.Message - - producer ! Message("bar", Map(Message.MessageExchangeId -> "123")) - -**Java** - -.. code-block:: java - - // TODO - -Responses of type Message or Failure will contain that header as well. When -receiving messages from Camel endpoints this message header is already set (see -:ref:`camel-consume-messages`). - - -Matching responses -^^^^^^^^^^^^^^^^^^ - -The following code snippet shows how to best match responses when sending -messages with the ``?`` operator (Scala) or with the ``ask`` method -(Java). - -**Scala** - -.. code-block:: scala - - val response = (producer ? message).get - - response match { - case Some(Message(body, headers)) => ... - case Some(Failure(exception, headers)) => ... - case _ => ... - } - -**Java** - -.. code-block:: java - - // TODO - - -ProducerTemplate ----------------- - -The `Producer`_ trait (and the abstract UntypedProducerActor class) is a very -convenient way for actors to produce messages to Camel endpoints. (Untyped) -actors and typed actors may also use a Camel `ProducerTemplate`_ for producing -messages to endpoints. For typed actors it's the only way to produce messages to -Camel endpoints. - -At the moment, only the Producer trait fully supports asynchronous in-out -message exchanges with Camel endpoints without allocating a thread for the full -duration of the exchange. For example, when using endpoints that support -asynchronous message exchanges (such as Jetty endpoints that internally use -`Jetty's asynchronous HTTP client`_) then usage of the Producer trait is highly -recommended (see also :ref:`camel-asynchronous-routing`). - -.. _Producer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Producer.scala -.. _ProducerTemplate: http://camel.apache.org/maven/camel-2.2.0/camel-core/apidocs/index.html -.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient - - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -A managed ProducerTemplate instance can be obtained via -CamelContextManager.mandatoryTemplate. In the following example, an actor uses a -ProducerTemplate to send a one-way message to a ``direct:news`` endpoint. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.CamelContextManager - - class ProducerActor extends Actor { - protected def receive = { - // one-way message exchange with direct:news endpoint - case msg => CamelContextManager.mandatoryTemplate.sendBody("direct:news", msg) - } - } - -**Java** - -.. code-block:: java - - import akka.actor.UntypedActor; - import akka.camel.CamelContextManager; - - public class SampleUntypedActor extends UntypedActor { - public void onReceive(Object msg) { - CamelContextManager.getMandatoryTemplate().sendBody("direct:news", msg); - } - } - -Alternatively, one can also use ``Option[ProducerTemplate]`` returned by -``CamelContextManager.template``. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.CamelContextManager - - class ProducerActor extends Actor { - protected def receive = { - // one-way message exchange with direct:news endpoint - case msg => for(t <- CamelContextManager.template) t.sendBody("direct:news", msg) - } - } - -**Java** - -.. code-block:: java - - import org.apache.camel.ProducerTemplate - - import akka.actor.UntypedActor; - import akka.camel.CamelContextManager; - - public class SampleUntypedActor extends UntypedActor { - public void onReceive(Object msg) { - for (ProducerTemplate t : CamelContextManager.getTemplate()) { - t.sendBody("direct:news", msg); - } - } - } - -For initiating a a two-way message exchange, one of the -``ProducerTemplate.request*`` methods must be used. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.CamelContextManager - - class ProducerActor extends Actor { - protected def receive = { - // two-way message exchange with direct:news endpoint - case msg => self.reply(CamelContextManager.mandatoryTemplate.requestBody("direct:news", msg)) - } - } - -**Java** - -.. code-block:: java - - import akka.actor.UntypedActor; - import akka.camel.CamelContextManager; - - public class SampleUntypedActor extends UntypedActor { - public void onReceive(Object msg) { - getContext().tryReply(CamelContextManager.getMandatoryTemplate().requestBody("direct:news", msg)); - } - } - - -Typed actors -^^^^^^^^^^^^ - -Typed Actors get access to a managed ProducerTemplate in the same way, as shown -in the next example. - -**Scala** - -.. code-block:: scala - - // TODO - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - import akka.camel.CamelContextManager; - - public class SampleProducerImpl extends TypedActor implements SampleProducer { - public void foo(String msg) { - ProducerTemplate template = CamelContextManager.getMandatoryTemplate(); - template.sendBody("direct:news", msg); - } - } - - -.. _camel-asynchronous-routing: - -Asynchronous routing -==================== - -Since Akka 0.10, in-out message exchanges between endpoints and actors are -designed to be asynchronous. This is the case for both, consumer and producer -actors. - -* A consumer endpoint sends request messages to its consumer actor using the ``!`` - (bang) operator and the actor returns responses with self.reply once they are - ready. The sender reference used for reply is an adapter to Camel's asynchronous - routing engine that implements the ActorRef trait. - -* A producer actor sends request messages to its endpoint using Camel's - asynchronous routing engine. Asynchronous responses are wrapped and added to the - producer actor's mailbox for later processing. By default, response messages are - returned to the initial sender but this can be overridden by Producer - implementations (see also description of the ``receiveAfterProcessing`` method - in :ref:`camel-custom-processing`). - -However, asynchronous two-way message exchanges, without allocating a thread for -the full duration of exchange, cannot be generically supported by Camel's -asynchronous routing engine alone. This must be supported by the individual -`Camel components`_ (from which endpoints are created) as well. They must be -able to suspend any work started for request processing (thereby freeing threads -to do other work) and resume processing when the response is ready. This is -currently the case for a `subset of components`_ such as the `Jetty component`_. -All other Camel components can still be used, of course, but they will cause -allocation of a thread for the duration of an in-out message exchange. There's -also a :ref:`camel-async-example` that implements both, an asynchronous -consumer and an asynchronous producer, with the jetty component. - -.. _Camel components: http://camel.apache.org/components.html -.. _subset of components: http://camel.apache.org/asynchronous-routing-engine.html -.. _Jetty component: http://camel.apache.org/jetty.html - - -Fault tolerance -=============== - -Consumer actors and typed actors can be also managed by supervisors. If a -consumer is configured to be restarted upon failure the associated Camel -endpoint is not restarted. It's behaviour during restart is as follows. - -* A one-way (in-only) message exchange will be queued by the consumer and - processed once restart completes. - -* A two-way (in-out) message exchange will wait and either succeed after restart - completes or time-out when the restart duration exceeds - the :ref:`camel-timeout`. - -If a consumer is configured to be shut down upon failure, the associated -endpoint is shut down as well. For details refer to :ref:`camel-unpublishing`. - -For examples, tips and trick how to implement fault-tolerant consumer and -producer actors, take a look at these two articles. - -* `Akka Consumer Actors: New Features and Best Practices `_ -* `Akka Producer Actors: New Features and Best Practices `_ - - -.. _camel-configuration: - -CamelService configuration -========================== - -For publishing consumer actors and typed actor methods -(:ref:`camel-publishing`), applications must start a CamelService. When starting -Akka in :ref:`microkernel` mode then a CamelService can be started automatically -when camel is added to the enabled-modules list in :ref:`configuration`, for example: - -.. code-block:: none - - akka { - ... - enabled-modules = ["camel"] # Options: ["remote", "camel", "http"] - ... - } - -Applications that do not use the Akka Kernel, such as standalone applications -for example, need to start a CamelService manually, as explained in the -following subsections.When starting a CamelService manually, settings in -:ref:`configuration` are ignored. - - -Standalone applications ------------------------ - -Standalone application should create and start a CamelService in the following way. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - startCamelService - -**Java** - -.. code-block:: java - - import static akka.camel.CamelServiceManager.*; - - startCamelService(); - -Internally, a CamelService uses the CamelContextManager singleton to manage a -CamelContext. A CamelContext manages the routes from endpoints to consumer -actors and typed actors. These routes are added and removed at runtime (when -(untyped) consumer actors and typed consumer actors are started and stopped). -Applications may additionally want to add their own custom routes or modify the -CamelContext in some other way. This can be done by initializing the -CamelContextManager manually and making modifications to CamelContext **before** -the CamelService is started. - -**Scala** - -.. code-block:: scala - - import org.apache.camel.builder.RouteBuilder - - import akka.camel.CamelContextManager - import akka.camel.CamelServiceManager._ - - CamelContextManager.init - - // add a custom route to the managed CamelContext - CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) - - startCamelService - - // an application-specific route builder - class CustomRouteBuilder extends RouteBuilder { - def configure { - // ... - } - } - -**Java** - -.. code-block:: java - - import org.apache.camel.builder.RouteBuilder; - - import akka.camel.CamelContextManager; - import static akka.camel.CamelServiceManager.*; - - CamelContextManager.init(); - - // add a custom route to the managed CamelContext - CamelContextManager.getMandatoryContext().addRoutes(new CustomRouteBuilder()); - - startCamelService(); - - // an application-specific route builder - private static class CustomRouteBuilder extends RouteBuilder { - public void configure() { - // ... - } - } - - -Applications may even provide their own CamelContext instance as argument to the -init method call as shown in the following snippet. Here, a DefaultCamelContext -is created using a Spring application context as `registry`_. - -.. _registry: http://camel.apache.org/registry.html - - -**Scala** - -.. code-block:: scala - - import org.apache.camel.impl.DefaultCamelContext - import org.apache.camel.spring.spi.ApplicationContextRegistry - import org.springframework.context.support.ClassPathXmlApplicationContext - - import akka.camel.CamelContextManager - import akka.camel.CamelServiceManager._ - - // create a custom Camel registry backed up by a Spring application context - val context = new ClassPathXmlApplicationContext("/context.xml") - val registry = new ApplicationContextRegistry(context) - - // initialize CamelContextManager with a DefaultCamelContext using the custom registry - CamelContextManager.init(new DefaultCamelContext(registry)) - - // ... - - startCamelService - -**Java** - -.. code-block:: java - - import org.apache.camel.impl.DefaultCamelContext - import org.apache.camel.spi.Registry; - import org.apache.camel.spring.spi.ApplicationContextRegistry; - - import org.springframework.context.ApplicationContext; - import org.springframework.context.support.ClassPathXmlApplicationContext; - - import akka.camel.CamelContextManager; - import static akka.camel.CamelServiceManager.*; - - // create a custom Camel registry backed up by a Spring application context - ApplicationContext context = new ClassPathXmlApplicationContext("/context.xml"); - Registry registry = new ApplicationContextRegistry(context); - - // initialize CamelContextManager with a DefaultCamelContext using the custom registry - CamelContextManager.init(new DefaultCamelContext(registry)); - - // ... - - startCamelService(); - - -.. _camel-spring-applications: - -Standalone Spring applications ------------------------------- - -A better approach to configure a Spring application context as registry for the -CamelContext is to use `Camel's Spring support`_. Furthermore, -the :ref:`spring-module` module additionally supports a element -for creating and starting a CamelService. An optional reference to a custom -CamelContext can be defined for as well. Here's an example. - -.. _Camel's Spring support: http://camel.apache.org/spring.html - -.. code-block:: xml - - - - - - - - - - - - - - - - - -Creating a CamelContext this way automatically adds the defining Spring -application context as registry to that CamelContext. The CamelService is -started when the application context is started and stopped when the application -context is closed. A simple usage example is shown in the following snippet. - -**Scala** - -.. code-block:: scala - - import org.springframework.context.support.ClassPathXmlApplicationContext - import akka.camel.CamelContextManager - - // Create and start application context (start CamelService) - val appctx = new ClassPathXmlApplicationContext("/context.xml") - - // Access to CamelContext (SpringCamelContext) - val ctx = CamelContextManager.mandatoryContext - // Access to ProducerTemplate of that CamelContext - val tpl = CamelContextManager.mandatoryTemplate - - // use ctx and tpl ... - - // Close application context (stop CamelService) - appctx.close - -**Java** - -.. code-block:: java - - // TODO - - -If the CamelService doesn't reference a custom CamelContext then a -DefaultCamelContext is created (and accessible via the CamelContextManager). - -.. code-block:: xml - - - - - - - - - -Kernel mode ------------ - -For classes that are loaded by the Kernel or the Initializer, starting the -CamelService can be omitted, as discussed in the previous section. Since these -classes are loaded and instantiated before the CamelService is started (by -Akka), applications can make modifications to a CamelContext here as well (and -even provide their own CamelContext). Assuming there's a boot class -sample.camel.Boot configured in :ref:`configuration`. - -.. code-block:: none - - akka { - ... - boot = ["sample.camel.Boot"] - ... - } - -Modifications to the CamelContext can be done like in the following snippet. - -**Scala** - -.. code-block:: scala - - package sample.camel - - import org.apache.camel.builder.RouteBuilder - - import akka.camel.CamelContextManager - - class Boot { - CamelContextManager.init - - // Customize CamelContext with application-specific routes - CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) - - // No need to start CamelService here. It will be started - // when this classes has been loaded and instantiated. - } - - class CustomRouteBuilder extends RouteBuilder { - def configure { - // ... - } - } - -**Java** - -.. code-block:: java - - // TODO - - -Custom Camel routes -=================== - -In all the examples so far, routes to consumer actors have been automatically -constructed by akka-camel, when the actor was started. Although the default -route construction templates, used by akka-camel internally, are sufficient for -most use cases, some applications may require more specialized routes to actors. -The akka-camel module provides two mechanisms for customizing routes to actors, -which will be explained in this section. These are - -* Usage of :ref:`camel-components` to access (untyped) actor and actors. - Any Camel route can use these components to access Akka actors. - -* :ref:`camel-intercepting-route-construction` to (untyped) actor and actors. - Default routes to consumer actors are extended using predefined extension - points. - - -.. _camel-components: - -Akka Camel components ---------------------- - -Akka actors can be access from Camel routes using the `actor`_ and -`typed-actor`_ Camel components, respectively. These components can be used to -access any Akka actor (not only consumer actors) from Camel routes, as described -in the following sections. - -.. _actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/ActorComponent.scala -.. _typed-actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/TypedActorComponent.scala - - -Access to actors ----------------- - -To access (untyped) actors from custom Camel routes, the `actor`_ Camel -component should be used. It fully supports Camel's `asynchronous routing -engine`_. - -.. _actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/ActorComponent.scala -.. _asynchronous routing engine: http://camel.apache.org/asynchronous-routing-engine.html - -This component accepts the following enpoint URI formats: - -* ``actor:[?]`` -* ``actor:id:[][?]`` -* ``actor:uuid:[][?]`` - -where ```` and ```` refer to ``actorRef.id`` and the -String-representation of ``actorRef.uuid``, respectively. The ```` are -name-value pairs separated by ``&`` (i.e. ``name1=value1&name2=value2&...``). - - -URI options -^^^^^^^^^^^ - -The following URI options are supported: - -+----------+---------+---------+-------------------------------------------+ -| Name | Type | Default | Description | -+==========+=========+=========+===========================================+ -| blocking | Boolean | false | If set to true, in-out message exchanges | -| | | | with the target actor will be made with | -| | | | the ``!!`` operator, otherwise with the | -| | | | ``!`` operator. | -| | | | | -| | | | See also :ref:`camel-timeout`. | -+----------+---------+---------+-------------------------------------------+ -| autoack | Boolean | true | If set to true, in-only message exchanges | -| | | | are auto-acknowledged when the message is | -| | | | added to the actor's mailbox. If set to | -| | | | false, actors must acknowledge the | -| | | | receipt of the message. | -| | | | | -| | | | See also :ref:`camel-acknowledgements`. | -+----------+---------+---------+-------------------------------------------+ - -Here's an actor endpoint URI example containing an actor uuid:: - - actor:uuid:12345678?blocking=true - -In actor endpoint URIs that contain id: or uuid:, an actor identifier (id or -uuid) is optional. In this case, the in-message of an exchange produced to an -actor endpoint must contain a message header with name CamelActorIdentifier -(which is defined by the ActorComponent.ActorIdentifier field) and a value that -is the target actor's identifier. On the other hand, if the URI contains an -actor identifier, it can be seen as a default actor identifier that can be -overridden by messages containing a CamelActorIdentifier header. - - -Message headers -^^^^^^^^^^^^^^^ - -+----------------------+--------+-------------------------------------------+ -| Name | Type | Description | -+======================+========+===========================================+ -| CamelActorIdentifier | String | Contains the identifier (id or uuid) of | -| | | the actor to route the message to. The | -| | | identifier is interpreted as actor id if | -| | | the URI contains id:, the identifier is | -| | | interpreted as uuid id the URI contains | -| | | uuid:. A uuid value may also be of type | -| | | Uuid (not only String). The header name | -| | | is defined by the | -| | | ActorComponent.ActorIdentifier field. | -+----------------------+--------+-------------------------------------------+ - -Here's another actor endpoint URI example that doesn't define an actor uuid. In -this case the target actor uuid must be defined by the CamelActorIdentifier -message header:: - - actor:uuid: - -In the following example, a custom route to an actor is created, using the -actor's uuid (i.e. actorRef.uuid). The route starts from a `Jetty`_ endpoint and -ends at the target actor. - - -**Scala** - -.. code-block:: scala - - import org.apache.camel.builder.RouteBuilder - - import akka.actor._ - import akka.actor.Actor - import akka.actor.Actor._ - import akka.camel.{Message, CamelContextManager, CamelServiceManager} - - object CustomRouteExample extends Application { - val target = actorOf(Props[CustomRouteTarget]) - - CamelServiceManager.startCamelService - CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder(target.uuid)) - } - - class CustomRouteTarget extends Actor { - def receive = { - case msg: Message => self.reply("Hello %s" format msg.bodyAs[String]) - } - } - - class CustomRouteBuilder(uuid: Uuid) extends RouteBuilder { - def configure { - val actorUri = "actor:uuid:%s" format uuid - from("jetty:http://localhost:8877/camel/custom").to(actorUri) - } - } - - -**Java** - -.. code-block:: java - - import com.eaio.uuid.UUID; - - import org.apache.camel.builder.RouteBuilder; - import static akka.actor.Actors.*; - import akka.actor.ActorRef; - import akka.actor.UntypedActor; - import akka.camel.CamelServiceManager; - import akka.camel.CamelContextManager; - import akka.camel.Message; - - public class CustomRouteExample { - public static void main(String... args) throws Exception { - ActorRef target = actorOf(new Props(CustomRouteTarget.class)); - CamelServiceManager.startCamelService(); - CamelContextManager.getMandatoryContext().addRoutes(new CustomRouteBuilder(target.getUuid())); - } - } - - public class CustomRouteTarget extends UntypedActor { - public void onReceive(Object message) { - Message msg = (Message) message; - String body = msg.getBodyAs(String.class); - getContext().tryReply(String.format("Hello %s", body)); - } - } - - public class CustomRouteBuilder extends RouteBuilder { - private UUID uuid; - - public CustomRouteBuilder(UUID uuid) { - this.uuid = uuid; - } - - public void configure() { - String actorUri = String.format("actor:uuid:%s", uuid); - from("jetty:http://localhost:8877/camel/custom").to(actorUri); - } - } - -When the example is started, messages POSTed to -``http://localhost:8877/camel/custom`` are routed to the target actor. - - -Access to typed actors ----------------------- - -To access typed actor methods from custom Camel routes, the `typed-actor`_ Camel -component should be used. It is a specialization of the Camel `bean`_ component. -Applications should use the interface (endpoint URI syntax and options) as -described in the bean component documentation but with the typed-actor schema. -Typed Actors must be added to a `Camel registry`_ for being accessible by the -typed-actor component. - -.. _typed-actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/TypedActorComponent.scala -.. _bean: http://camel.apache.org/bean.html -.. _Camel registry: http://camel.apache.org/registry.html - - -.. _camel-typed-actors-using-spring: - -Using Spring -^^^^^^^^^^^^ - -The following example shows how to access typed actors in a Spring application -context. For adding typed actors to the application context and for starting -:ref:`camel-spring-applications` the :ref:`spring-module` module is used in the -following example. It offers a ```` element to define typed actor -factory beans and a ```` element to create and start a -CamelService. - -.. code-block:: xml - - - - - - - - - - - - - - - - - -SampleTypedActor is the typed actor interface and SampleTypedActorImpl in the -typed actor implementation class. - -**Scala** - -.. code-block:: scala - - package sample - - import akka.actor.TypedActor - - trait SampleTypedActor { - def foo(s: String): String - } - - class SampleTypedActorImpl extends TypedActor with SampleTypedActor { - def foo(s: String) = "hello %s" format s - } - -**Java** - -.. code-block:: java - - package sample; - - import akka.actor.TypedActor; - - public interface SampleTypedActor { - public String foo(String s); - } - - public class SampleTypedActorImpl extends TypedActor implements SampleTypedActor { - - public String foo(String s) { - return "hello " + s; - } - } - -The SampleRouteBuilder defines a custom route from the direct:test endpoint to -the sample typed actor using a typed-actor endpoint URI. - -**Scala** - -.. code-block:: scala - - package sample - - import org.apache.camel.builder.RouteBuilder - - class SampleRouteBuilder extends RouteBuilder { - def configure = { - // route to typed actor - from("direct:test").to("typed-actor:sample?method=foo") - } - } - -**Java** - -.. code-block:: java - - package sample; - - import org.apache.camel.builder.RouteBuilder; - - public class SampleRouteBuilder extends RouteBuilder { - public void configure() { - // route to typed actor - from("direct:test").to("typed-actor:sample?method=foo"); - } - } - -The typed-actor endpoint URI syntax is::: - - typed-actor:?method= - -where ```` is the id of the bean in the Spring application context and -```` is the name of the typed actor method to invoke. - -Usage of the custom route for sending a message to the typed actor is shown in -the following snippet. - -**Scala** - -.. code-block:: scala - - package sample - - import org.springframework.context.support.ClassPathXmlApplicationContext - import akka.camel.CamelContextManager - - // load Spring application context (starts CamelService) - val appctx = new ClassPathXmlApplicationContext("/context-standalone.xml") - - // access 'sample' typed actor via custom route - assert("hello akka" == CamelContextManager.mandatoryTemplate.requestBody("direct:test", "akka")) - - // close Spring application context (stops CamelService) - appctx.close - -**Java** - -.. code-block:: java - - package sample; - - import org.springframework.context.support.ClassPathXmlApplicationContext; - import akka.camel.CamelContextManager; - - // load Spring application context - ClassPathXmlApplicationContext appctx = new ClassPathXmlApplicationContext("/context-standalone.xml"); - - // access 'externally' registered typed actors with typed-actor component - assert("hello akka" == CamelContextManager.getMandatoryTemplate().requestBody("direct:test", "akka")); - - // close Spring application context (stops CamelService) - appctx.close(); - -The application uses a Camel `producer template`_ to access the typed actor via -the ``direct:test`` endpoint. - -.. _producer template: http://camel.apache.org/producertemplate.html - - -Without Spring -^^^^^^^^^^^^^^ - -Usage of :ref:`spring-module` for adding typed actors to the Camel registry and -starting a CamelService is optional. Setting up a Spring-less application for -accessing typed actors is shown in the next example. - -**Scala** - -.. code-block:: scala - - package sample - - import org.apache.camel.impl.{DefaultCamelContext, SimpleRegistry} - import akka.actor.TypedActor - import akka.camel.CamelContextManager - import akka.camel.CamelServiceManager._ - - // register typed actor - val registry = new SimpleRegistry - registry.put("sample", TypedActor.newInstance(classOf[SampleTypedActor], classOf[SampleTypedActorImpl])) - - // customize CamelContext - CamelContextManager.init(new DefaultCamelContext(registry)) - CamelContextManager.mandatoryContext.addRoutes(new SampleRouteBuilder) - - startCamelService - - // access 'sample' typed actor via custom route - assert("hello akka" == CamelContextManager.mandatoryTemplate.requestBody("direct:test", "akka")) - - stopCamelService - -**Java** - -.. code-block:: java - - package sample; - - // register typed actor - SimpleRegistry registry = new SimpleRegistry(); - registry.put("sample", TypedActor.newInstance(SampleTypedActor.class, SampleTypedActorImpl.class)); - - // customize CamelContext - CamelContextManager.init(new DefaultCamelContext(registry)); - CamelContextManager.getMandatoryContext().addRoutes(new SampleRouteBuilder()); - - startCamelService(); - - // access 'sample' typed actor via custom route - assert("hello akka" == CamelContextManager.getMandatoryTemplate().requestBody("direct:test", "akka")); - - stopCamelService(); - -Here, `SimpleRegistry`_, a java.util.Map based registry, is used to register -typed actors. The CamelService is started and stopped programmatically. - -.. _SimpleRegistry: https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/impl/SimpleRegistry.java - - -.. _camel-intercepting-route-construction: - -Intercepting route construction -------------------------------- - -The previous section, :ref:`camel-components`, explained how to setup a route to -an (untyped) actor or typed actor manually. It was the application's -responsibility to define the route and add it to the current CamelContext. This -section explains a more conventient way to define custom routes: akka-camel is -still setting up the routes to consumer actors (and adds these routes to the -current CamelContext) but applications can define extensions to these routes. -Extensions can be defined with Camel's `Java DSL`_ or `Scala DSL`_. For example, -an extension could be a custom error handler that redelivers messages from an -endpoint to an actor's bounded mailbox when the mailbox was full. - -.. _Java DSL: http://camel.apache.org/dsl.html -.. _Scala DSL: http://camel.apache.org/scala-dsl.html - -The following examples demonstrate how to extend a route to a consumer actor for -handling exceptions thrown by that actor. To simplify the example, we configure -:ref:`camel-blocking-exchanges` which reports any exception, that is thrown by -receive, directly back to the Camel route. One could also report exceptions -asynchronously using a Failure reply (see also `this article`__) but we'll do it -differently here. - -__ http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html - - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.Consumer - - import org.apache.camel.builder.Builder - import org.apache.camel.model.RouteDefinition - - class ErrorHandlingConsumer extends Actor with Consumer { - def endpointUri = "direct:error-handler-test" - - // Needed to propagate exception back to caller - override def blocking = true - - onRouteDefinition {rd: RouteDefinition => - // Catch any exception and handle it by returning the exception message as response - rd.onException(classOf[Exception]).handled(true).transform(Builder.exceptionMessage).end - } - - protected def receive = { - case msg: Message => throw new Exception("error: %s" format msg.body) - } - } - -**Java** - -.. code-block:: java - - import akka.camel.UntypedConsumerActor; - - import org.apache.camel.builder.Builder; - import org.apache.camel.model.ProcessorDefinition; - import org.apache.camel.model.RouteDefinition; - - public class SampleErrorHandlingConsumer extends UntypedConsumerActor { - - public String getEndpointUri() { - return "direct:error-handler-test"; - } - - // Needed to propagate exception back to caller - public boolean isBlocking() { - return true; - } - - public void preStart() { - onRouteDefinition(new RouteDefinitionHandler() { - public ProcessorDefinition onRouteDefinition(RouteDefinition rd) { - // Catch any exception and handle it by returning the exception message as response - return rd.onException(Exception.class).handled(true).transform(Builder.exceptionMessage()).end(); - } - }); - } - - public void onReceive(Object message) throws Exception { - Message msg = (Message)message; - String body = msg.getBodyAs(String.class); - throw new Exception(String.format("error: %s", body)); - } - - } - - - -For (untyped) actors, consumer route extensions are defined by calling the -onRouteDefinition method with a route definition handler. In Scala, this is a -function of type ``RouteDefinition => ProcessorDefinition[_]``, in Java it is an -instance of ``RouteDefinitionHandler`` which is defined as follows. - -.. code-block:: scala - - package akka.camel - - import org.apache.camel.model.RouteDefinition - import org.apache.camel.model.ProcessorDefinition - - trait RouteDefinitionHandler { - def onRouteDefinition(rd: RouteDefinition): ProcessorDefinition[_] - } - -The akka-camel module creates a RouteDefinition instance by calling -from(endpointUri) on a Camel RouteBuilder (where endpointUri is the endpoint URI -of the consumer actor) and passes that instance as argument to the route -definition handler \*). The route definition handler then extends the route and -returns a ProcessorDefinition (in the above example, the ProcessorDefinition -returned by the end method. See the `org.apache.camel.model`__ package for -details). After executing the route definition handler, akka-camel finally calls -a to(actor:uuid:actorUuid) on the returned ProcessorDefinition to complete the -route to the comsumer actor (where actorUuid is the uuid of the consumer actor). - -\*) Before passing the RouteDefinition instance to the route definition handler, -akka-camel may make some further modifications to it. - -__ https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/model/ - - -Typed actors -^^^^^^^^^^^^ - -For typed consumer actors to define a route definition handler, they must -provide a RouteDefinitionHandler implementation class with the @consume -annotation. The implementation class must have a no-arg constructor. Here's an -example (in Java). - -.. code-block:: java - - import org.apache.camel.builder.Builder; - import org.apache.camel.model.ProcessorDefinition; - import org.apache.camel.model.RouteDefinition; - - public class SampleRouteDefinitionHandler implements RouteDefinitionHandler { - public ProcessorDefinition onRouteDefinition(RouteDefinition rd) { - return rd.onException(Exception.class).handled(true).transform(Builder.exceptionMessage()).end(); - } - } - -It can be used as follows. - -**Scala** - -.. code-block:: scala - - trait TestTypedConsumer { - @consume(value="direct:error-handler-test", routeDefinitionHandler=classOf[SampleRouteDefinitionHandler]) - def foo(s: String): String - } - - // implementation class omitted - -**Java** - -.. code-block:: java - - public interface SampleErrorHandlingTypedConsumer { - - @consume(value="direct:error-handler-test", routeDefinitionHandler=SampleRouteDefinitionHandler.class) - String foo(String s); - - } - - // implementation class omitted - - -.. _camel-examples: - -Examples -======== - -For all features described so far, there's running sample code in -`akka-sample-camel`_. The examples in `sample.camel.Boot`_ are started during -Kernel startup because this class has been added to the boot :ref:`configuration`. - -.. _akka-sample-camel: http://github.com/jboner/akka/tree/master/akka-samples/akka-sample-camel/ -.. _sample.camel.Boot: http://github.com/jboner/akka/blob/master/akka-samples/akka-sample-camel/src/main/scala/sample/camel/Boot.scala - -.. code-block:: none - - akka { - ... - boot = ["sample.camel.Boot", ...] - ... - } - -If you don't want to have these examples started during Kernel startup, delete -it from the :ref:`configuration`. Other examples are standalone applications (i.e. classes with a -main method) that can be started from `sbt`_. - -.. _sbt: http://code.google.com/p/simple-build-tool/ - -.. code-block:: none - - $ sbt - [info] Building project akka 2.0-SNAPSHOT against Scala 2.9.0 - [info] using AkkaModulesParentProject with sbt 0.7.7 and Scala 2.7.7 - > project akka-sample-camel - Set current project to akka-sample-camel 2.0-SNAPSHOT - > run - ... - Multiple main classes detected, select one to run: - - [1] sample.camel.ClientApplication - [2] sample.camel.ServerApplication - [3] sample.camel.StandaloneSpringApplication - [4] sample.camel.StandaloneApplication - [5] sample.camel.StandaloneFileApplication - [6] sample.camel.StandaloneJmsApplication - - -Some of the examples in `akka-sample-camel`_ are described in more detail in the -following subsections. - - -.. _camel-async-example: - -Asynchronous routing and transformation example ------------------------------------------------ - -This example demonstrates how to implement consumer and producer actors that -support :ref:`camel-asynchronous-routing` with their Camel endpoints. The sample -application transforms the content of the Akka homepage, http://akka.io, by -replacing every occurrence of *Akka* with *AKKA*. After starting -the :ref:`microkernel`, direct the browser to http://localhost:8875 and the -transformed Akka homepage should be displayed. Please note that this example -will probably not work if you're behind an HTTP proxy. - -The following figure gives an overview how the example actors interact with -external systems and with each other. A browser sends a GET request to -http://localhost:8875 which is the published endpoint of the ``HttpConsumer`` -actor. The ``HttpConsumer`` actor forwards the requests to the ``HttpProducer`` -actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML -is then forwarded to the ``HttpTransformer`` actor which replaces all occurences -of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer -which finally returns it to the browser. - -.. image:: camel-async-interact.png - -Implementing the example actor classes and wiring them together is rather easy -as shown in the following snippet (see also `sample.camel.Boot`_). - -.. code-block:: scala - - import org.apache.camel.Exchange - import akka.actor.Actor._ - import akka.actor.{Actor, ActorRef} - import akka.camel.{Producer, Message, Consumer} - - class HttpConsumer(producer: ActorRef) extends Actor with Consumer { - def endpointUri = "jetty:http://0.0.0.0:8875/" - - protected def receive = { - case msg => producer forward msg - } - } - - class HttpProducer(transformer: ActorRef) extends Actor with Producer { - def endpointUri = "jetty://http://akka.io/?bridgeEndpoint=true" - - override protected def receiveBeforeProduce = { - // only keep Exchange.HTTP_PATH message header (which needed by bridge endpoint) - case msg: Message => msg.setHeaders(msg.headers(Set(Exchange.HTTP_PATH))) - } - - override protected def receiveAfterProduce = { - // do not reply but forward result to transformer - case msg => transformer forward msg - } - } - - class HttpTransformer extends Actor { - protected def receive = { - case msg: Message => self.reply(msg.transformBody {body: String => body replaceAll ("Akka ", "AKKA ")}) - case msg: Failure => self.reply(msg) - } - } - - // Wire and start the example actors - val httpTransformer = actorOf(Props(new HttpTransformer)) - val httpProducer = actorOf(Props(new HttpProducer(httpTransformer))) - val httpConsumer = actorOf(Props(new HttpConsumer(httpProducer))) - -The `jetty endpoints`_ of HttpConsumer and HttpProducer support asynchronous -in-out message exchanges and do not allocate threads for the full duration of -the exchange. This is achieved by using `Jetty continuations`_ on the -consumer-side and by using `Jetty's asynchronous HTTP client`_ on the producer -side. The following high-level sequence diagram illustrates that. - -.. _jetty endpoints: http://camel.apache.org/jetty.html -.. _Jetty continuations: http://wiki.eclipse.org/Jetty/Feature/Continuations -.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient - -.. image:: camel-async-sequence.png - - -Custom Camel route example --------------------------- - -This section also demonstrates the combined usage of a ``Producer`` and a -``Consumer`` actor as well as the inclusion of a custom Camel route. The -following figure gives an overview. - -.. image:: camel-custom-route.png - -* A consumer actor receives a message from an HTTP client - -* It forwards the message to another actor that transforms the message (encloses - the original message into hyphens) - -* The transformer actor forwards the transformed message to a producer actor - -* The producer actor sends the message to a custom Camel route beginning at the - ``direct:welcome`` endpoint - -* A processor (transformer) in the custom Camel route prepends "Welcome" to the - original message and creates a result message - -* The producer actor sends the result back to the consumer actor which returns - it to the HTTP client - - -The example is part of `sample.camel.Boot`_. The consumer, transformer and -producer actor implementations are as follows. - -.. code-block:: scala - - package sample.camel - - import akka.actor.{Actor, ActorRef} - import akka.camel.{Message, Consumer} - - class Consumer3(transformer: ActorRef) extends Actor with Consumer { - def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome" - - def receive = { - // Forward a string representation of the message body to transformer - case msg: Message => transformer.forward(msg.setBodyAs[String]) - } - } - - class Transformer(producer: ActorRef) extends Actor { - protected def receive = { - // example: transform message body "foo" to "- foo -" and forward result to producer - case msg: Message => producer.forward(msg.transformBody((body: String) => "- %s -" format body)) - } - } - - class Producer1 extends Actor with Producer { - def endpointUri = "direct:welcome" - } - -The producer actor knows where to reply the message to because the consumer and -transformer actors have forwarded the original sender reference as well. The -application configuration and the route starting from direct:welcome are as -follows. - -.. code-block:: scala - - package sample.camel - - import org.apache.camel.builder.RouteBuilder - import org.apache.camel.{Exchange, Processor} - - import akka.actor.Actor._ - import akka.camel.CamelContextManager - - class Boot { - CamelContextManager.init() - CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) - - val producer = actorOf(Props[Producer1]) - val mediator = actorOf(Props(new Transformer(producer))) - val consumer = actorOf(Props(new Consumer3(mediator))) - } - - class CustomRouteBuilder extends RouteBuilder { - def configure { - from("direct:welcome").process(new Processor() { - def process(exchange: Exchange) { - // Create a 'welcome' message from the input message - exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody) - } - }) - } - } - -To run the example, start the :ref:`microkernel` and POST a message to -``http://localhost:8877/camel/welcome``. - -.. code-block:: none - - curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome - -The response should be: - -.. code-block:: none - - Welcome - Anke - - - -Publish-subcribe example ------------------------- - -JMS -^^^ - -This section demonstrates how akka-camel can be used to implement -publish/subscribe for actors. The following figure sketches an example for -JMS-based publish/subscribe. - -.. image:: camel-pubsub.png - -A consumer actor receives a message from an HTTP client. It sends the message to -a JMS producer actor (publisher). The JMS producer actor publishes the message -to a JMS topic. Two other actors that subscribed to that topic both receive the -message. The actor classes used in this example are shown in the following -snippet. - -.. code-block:: scala - - package sample.camel - - import akka.actor.{Actor, ActorRef} - import akka.camel.{Producer, Message, Consumer} - - class Subscriber(name:String, uri: String) extends Actor with Consumer { - def endpointUri = uri - - protected def receive = { - case msg: Message => println("%s received: %s" format (name, msg.body)) - } - } - - class Publisher(name: String, uri: String) extends Actor with Producer { - self.id = name - - def endpointUri = uri - - // one-way communication with JMS - override def oneway = true - } - - class PublisherBridge(uri: String, publisher: ActorRef) extends Actor with Consumer { - def endpointUri = uri - - protected def receive = { - case msg: Message => { - publisher ! msg.bodyAs[String] - self.reply("message published") - } - } - } - -Wiring these actors to implement the above example is as simple as - -.. code-block:: scala - - package sample.camel - - import org.apache.camel.impl.DefaultCamelContext - import org.apache.camel.spring.spi.ApplicationContextRegistry - import org.springframework.context.support.ClassPathXmlApplicationContext - - import akka.actor.Actor._ - import akka.camel.CamelContextManager - - class Boot { - // Create CamelContext with Spring-based registry and custom route builder - val context = new ClassPathXmlApplicationContext("/context-jms.xml", getClass) - val registry = new ApplicationContextRegistry(context) - CamelContextManager.init(new DefaultCamelContext(registry)) - - // Setup publish/subscribe example - val jmsUri = "jms:topic:test" - val jmsSubscriber1 = actorOf(Props(new Subscriber("jms-subscriber-1", jmsUri))) - val jmsSubscriber2 = actorOf(Props(new Subscriber("jms-subscriber-2", jmsUri))) - val jmsPublisher = actorOf(Props(new Publisher("jms-publisher", jmsUri))) - - val jmsPublisherBridge = actorOf(Props(new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/jms", jmsPublisher))) - } - -To publish messages to subscribers one could of course also use the JMS API -directly; there's no need to do that over a JMS producer actor as in this -example. For the example to work, Camel's `jms`_ component needs to be -configured with a JMS connection factory which is done in a Spring application -context XML file (context-jms.xml). - -.. _jms: http://camel.apache.org/jms.html - -.. code-block:: xml - - - - - - - - - - - - - - - - - - - - - - - - - -To run the example, start the :ref:`microkernel` and POST a -message to ``http://localhost:8877/camel/pub/jms``. - -.. code-block:: none - - curl -H "Content-Type: text/plain" -d "Happy hAkking" http://localhost:8877/camel/pub/jms - -The HTTP response body should be - -.. code-block:: none - - message published - -On the console, where you started the Akka Kernel, you should see something like - -.. code-block:: none - - ... - INF [20100622-11:49:57.688] camel: jms-subscriber-2 received: Happy hAkking - INF [20100622-11:49:57.688] camel: jms-subscriber-1 received: Happy hAkking - - -Cometd -^^^^^^ - -Publish/subscribe with `CometD`_ is equally easy using `Camel's cometd -component`_. - -.. _CometD: http://cometd.org/ -.. _Camel's cometd component: http://camel.apache.org/cometd.html - -.. image:: camel-pubsub2.png - -All actor classes from the JMS example can re-used, only the endpoint URIs need -to be changed. - -.. code-block:: scala - - package sample.camel - - import org.apache.camel.impl.DefaultCamelContext - import org.apache.camel.spring.spi.ApplicationContextRegistry - import org.springframework.context.support.ClassPathXmlApplicationContext - - import akka.actor.Actor._ - import akka.camel.CamelContextManager - - class Boot { - // ... - - // Setup publish/subscribe example - val cometdUri = "cometd://localhost:8111/test/abc?resourceBase=target" - val cometdSubscriber = actorOf(Props(new Subscriber("cometd-subscriber", cometdUri))) - val cometdPublisher = actorOf(Props(new Publisher("cometd-publisher", cometdUri))) - - val cometdPublisherBridge = actorOf(Props(new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/cometd", cometdPublisher))) - } - - -Quartz Scheduler Example ------------------------- - -Here is an example showing how simple is to implement a cron-style scheduler by -using the Camel Quartz component in Akka. - -The following example creates a "timer" actor which fires a message every 2 -seconds: - -.. code-block:: scala - - package com.dimingo.akka - - import akka.actor.Actor - import akka.actor.Actor.actorOf - - import akka.camel.{Consumer, Message} - import akka.camel.CamelServiceManager._ - - class MyQuartzActor extends Actor with Consumer { - - def endpointUri = "quartz://example?cron=0/2+*+*+*+*+?" - - def receive = { - - case msg => println("==============> received %s " format msg) - - } // end receive - - } // end MyQuartzActor - - object MyQuartzActor { - - def main(str: Array[String]) { - - // start the Camel service - startCamelService - - // create and start a quartz actor - val myActor = actorOf(Props[MyQuartzActor]) - - } // end main - - } // end MyQuartzActor - -The full working example is available for download here: -http://www.dimingo.com/akka/examples/example-akka-quartz.tar.gz - -You can launch it using the maven command: - -.. code-block:: none - - $ mvn scala:run -DmainClass=com.dimingo.akka.MyQuartzActor - -For more information about the Camel Quartz component, see here: -http://camel.apache.org/quartz.html diff --git a/akka-docs/disabled/http.rst b/akka-docs/disabled/http.rst deleted file mode 100644 index 0d9423a676..0000000000 --- a/akka-docs/disabled/http.rst +++ /dev/null @@ -1,105 +0,0 @@ -.. _http-module: - -HTTP -==== - -.. sidebar:: Contents - - .. contents:: :local: - -When deploying in a servlet container: --------------------------------------------- - -If you deploy Akka in a JEE container, don't forget to create an Akka initialization and cleanup hook: - -.. code-block:: scala - - package com.my //<--- your own package - import akka.util.AkkaLoader - import akka.cluster.BootableRemoteActorService - import akka.actor.BootableActorLoaderService - import javax.servlet.{ServletContextListener, ServletContextEvent} - - /** - * This class can be added to web.xml mappings as a listener to start and postStop Akka. - * - * ... - * - * com.my.Initializer - * - * ... - * - */ - class Initializer extends ServletContextListener { - lazy val loader = new AkkaLoader - def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown - def contextInitialized(e: ServletContextEvent): Unit = - loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService) //<--- Important - // loader.boot(true, new BootableActorLoaderService {}) // If you don't need akka-remote - } - -For Java users, it's currently only possible to use BootableActorLoaderService, but you'll need to use: akka.actor.DefaultBootableActorLoaderService - - -Then you just declare it in your web.xml: - -.. code-block:: xml - - - ... - - your.package.Initializer - - ... - - -Adapting your own Akka Initializer for the Servlet Container ------------------------------------------------------------- - -If you want to use akka-camel or any other modules that have their own "Bootable"'s you'll need to write your own Initializer, which is _ultra_ simple, see below for an example on how to include Akka-camel. - -.. code-block:: scala - - package com.my //<--- your own package - import akka.cluster.BootableRemoteActorService - import akka.actor.BootableActorLoaderService - import akka.camel.CamelService - import javax.servlet.{ServletContextListener, ServletContextEvent} - - /** - * This class can be added to web.xml mappings as a listener to start and postStop Akka. - * - * ... - * - * com.my.Initializer - * - * ... - * - */ - class Initializer extends ServletContextListener { - lazy val loader = new AkkaLoader - def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown - def contextInitialized(e: ServletContextEvent): Unit = - loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService with CamelService) //<--- Important - } - -Using Akka with the Pinky REST/MVC framework --------------------------------------------- - -Pinky has a slick Akka integration. Read more `here `_ - -jetty-run in SBT ----------------- - -If you want to use jetty-run in SBT you need to exclude the version of Jetty that is bundled in akka-http: - -.. code-block:: scala - - override def ivyXML = - - - - - - - diff --git a/akka-docs/disabled/microkernel.rst b/akka-docs/disabled/microkernel.rst deleted file mode 100644 index cbf9ba96ba..0000000000 --- a/akka-docs/disabled/microkernel.rst +++ /dev/null @@ -1,40 +0,0 @@ - -.. _microkernel: - -############# - Microkernel -############# - - -Run the microkernel -=================== - -To start the kernel use the scripts in the ``bin`` directory. - -All services are configured in the :ref:`configuration` file in the ``config`` directory. -Services you want to be started up automatically should be listed in the list of ``boot`` classes in -the :ref:`configuration`. - -Put your application in the ``deploy`` directory. - - -Akka Home ---------- - -Note that the microkernel needs to know where the Akka home is (the base -directory of the microkernel). The above scripts do this for you. Otherwise, you -can set Akka home by: - -* Specifying the ``AKKA_HOME`` environment variable - -* Specifying the ``-Dakka.home`` java option - - -.. _hello-microkernel: - -Hello Microkernel -================= - -There is a very simple Akka Mist sample project included in the microkernel -``deploy`` directory. Start the microkernel with the start script and then go to -http://localhost:9998 to say Hello to the microkernel. From 4dd5e9612bcffdbbea9b23ccf604262385f06c35 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 3 Jan 2012 15:45:12 +0100 Subject: [PATCH 38/84] Migration kit and start of migration documentation. See #1406. * Documentation of migration kit * Documentation of some of the changes * akka-actor-migration module containing GlobalActorSystem, OldXxx classes and some implicit conversions * Tried migration of WebWords sample * Tried migration of akka-samples/async-workers * Tried migration of akka-samples-trading --- .../scala/akka/actor/GlobalActorSystem.scala | 64 ++++ .../src/main/scala/akka/actor/OldActor.scala | 171 +++++++++ .../main/scala/akka/actor/OldScheduler.scala | 75 ++++ .../main/scala/akka/config/OldConfig.scala | 162 ++++++++ .../main/scala/akka/dispatch/OldFuture.scala | 65 ++++ .../scala/akka/event/OldEventHandler.scala | 81 ++++ .../main/scala/akka/migration/package.scala | 34 ++ .../src/main/scala/akka/routing/Routing.scala | 80 ++-- akka-docs/general/jmm.rst | 2 + .../project/migration-guide-1.3.x-2.0.x.rst | 350 ++++++++++++++++++ .../code/akka/docs/actor/ActorDocSpec.scala | 3 + project/AkkaBuild.scala | 9 +- 12 files changed, 1055 insertions(+), 41 deletions(-) create mode 100644 akka-actor-migration/src/main/scala/akka/actor/GlobalActorSystem.scala create mode 100644 akka-actor-migration/src/main/scala/akka/actor/OldActor.scala create mode 100644 akka-actor-migration/src/main/scala/akka/actor/OldScheduler.scala create mode 100644 akka-actor-migration/src/main/scala/akka/config/OldConfig.scala create mode 100644 akka-actor-migration/src/main/scala/akka/dispatch/OldFuture.scala create mode 100644 akka-actor-migration/src/main/scala/akka/event/OldEventHandler.scala create mode 100644 akka-actor-migration/src/main/scala/akka/migration/package.scala diff --git a/akka-actor-migration/src/main/scala/akka/actor/GlobalActorSystem.scala b/akka-actor-migration/src/main/scala/akka/actor/GlobalActorSystem.scala new file mode 100644 index 0000000000..e0ba90b1f9 --- /dev/null +++ b/akka-actor-migration/src/main/scala/akka/actor/GlobalActorSystem.scala @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.actor + +import java.io.File + +import com.typesafe.config.Config +import com.typesafe.config.ConfigFactory +import com.typesafe.config.ConfigParseOptions +import com.typesafe.config.ConfigResolveOptions + +@deprecated("use ActorSystem instead", "2.0") +object GlobalActorSystem extends ActorSystemImpl("GlobalSystem", OldConfigurationLoader.defaultConfig) { + start() +} + +/** + * Loads configuration (akka.conf) from same location as Akka 1.x + */ +@deprecated("use default config location or write your own configuration loader", "2.0") +object OldConfigurationLoader { + + val defaultConfig: Config = { + val cfg = fromProperties orElse fromClasspath orElse fromHome getOrElse emptyConfig + val config = cfg.withFallback(ConfigFactory.defaultReference) + config.checkValid(ConfigFactory.defaultReference, "akka") + config + } + + // file extensions (.conf, .json, .properties), are handled by parseFileAnySyntax + val defaultLocation: String = (systemMode orElse envMode).map("akka." + _).getOrElse("akka") + + private def envMode = System.getenv("AKKA_MODE") match { + case null | "" ⇒ None + case value ⇒ Some(value) + } + + private def systemMode = System.getProperty("akka.mode") match { + case null | "" ⇒ None + case value ⇒ Some(value) + } + + private def configParseOptions = ConfigParseOptions.defaults.setAllowMissing(false) + + private def fromProperties = try { + val property = Option(System.getProperty("akka.config")) + property.map(p ⇒ + ConfigFactory.systemProperties.withFallback( + ConfigFactory.parseFileAnySyntax(new File(p), configParseOptions))) + } catch { case _ ⇒ None } + + private def fromClasspath = try { + Option(ConfigFactory.systemProperties.withFallback( + ConfigFactory.parseResourcesAnySyntax(ActorSystem.getClass, "/" + defaultLocation, configParseOptions))) + } catch { case _ ⇒ None } + + private def fromHome = try { + Option(ConfigFactory.systemProperties.withFallback( + ConfigFactory.parseFileAnySyntax(new File(ActorSystem.GlobalHome.get + "/config/" + defaultLocation), configParseOptions))) + } catch { case _ ⇒ None } + + private def emptyConfig = ConfigFactory.systemProperties +} \ No newline at end of file diff --git a/akka-actor-migration/src/main/scala/akka/actor/OldActor.scala b/akka-actor-migration/src/main/scala/akka/actor/OldActor.scala new file mode 100644 index 0000000000..0a9238209e --- /dev/null +++ b/akka-actor-migration/src/main/scala/akka/actor/OldActor.scala @@ -0,0 +1,171 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.actor + +import akka.japi.Creator +import akka.util.Timeout +import akka.dispatch.Future +import akka.dispatch.OldFuture +import akka.util.Duration +import java.util.concurrent.TimeUnit +import java.net.InetSocketAddress + +/** + * Migration replacement for `object akka.actor.Actor`. + */ +@deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0") +object OldActor { + + /** + * Creates an ActorRef out of the Actor with type T. + * It will be automatically started, i.e. remove old call to `start()`. + * + */ + @deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0") + def actorOf[T <: Actor: Manifest]: ActorRef = actorOf(manifest[T].erasure.asInstanceOf[Class[_ <: Actor]]) + + /** + * Creates an ActorRef out of the Actor of the specified Class. + * It will be automatically started, i.e. remove old call to `start()`. + */ + @deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0") + def actorOf(clazz: Class[_ <: Actor]): ActorRef = GlobalActorSystem.actorOf(Props(clazz)) + + /** + * Creates an ActorRef out of the Actor. Allows you to pass in a factory function + * that creates the Actor. Please note that this function can be invoked multiple + * times if for example the Actor is supervised and needs to be restarted. + * + * It will be automatically started, i.e. remove old call to `start()`. + */ + @deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0") + def actorOf(factory: ⇒ Actor): ActorRef = GlobalActorSystem.actorOf(Props(factory)) + + /** + * Creates an ActorRef out of the Actor. Allows you to pass in a factory (Creator) + * that creates the Actor. Please note that this function can be invoked multiple + * times if for example the Actor is supervised and needs to be restarted. + *

    + * JAVA API + */ + @deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0") + def actorOf(creator: Creator[Actor]): ActorRef = GlobalActorSystem.actorOf(Props(creator)) + + @deprecated("OldActor.remote should not be used", "2.0") + lazy val remote: OldRemoteSupport = new OldRemoteSupport + +} + +@deprecated("use Actor", "2.0") +abstract class OldActor extends Actor { + + implicit def askTimeout: Timeout = akka.migration.askTimeout + + implicit def future2OldFuture[T](future: Future[T]): OldFuture[T] = akka.migration.future2OldFuture(future) + + implicit def actorRef2OldActorRef(actorRef: ActorRef) = new OldActorRef(actorRef) + + @deprecated("Use context.become instead", "2.0") + def become(behavior: Receive, discardOld: Boolean = true) = context.become(behavior, discardOld) + + @deprecated("Use context.unbecome instead", "2.0") + def unbecome() = context.unbecome() + + class OldActorRef(actorRef: ActorRef) { + @deprecated("Actors are automatically started when creatd, i.e. remove old call to start()", "2.0") + def start(): ActorRef = actorRef + + @deprecated("Stop with ActorSystem or ActorContext instead", "2.0") + def exit() = stop() + + @deprecated("Stop with ActorSystem or ActorContext instead", "2.0") + def stop(): Unit = context.stop(actorRef) + + @deprecated("Use context.getReceiveTimeout instead", "2.0") + def getReceiveTimeout(): Option[Long] = context.receiveTimeout.map(_.toMillis) + + @deprecated("Use context.setReceiveTimeout instead", "2.0") + def setReceiveTimeout(timeout: Long) = context.setReceiveTimeout(Duration(timeout, TimeUnit.MILLISECONDS)) + + @deprecated("Use context.getReceiveTimeout instead", "2.0") + def receiveTimeout: Option[Long] = getReceiveTimeout() + + @deprecated("Use context.setReceiveTimeout instead", "2.0") + def receiveTimeout_=(timeout: Option[Long]) = setReceiveTimeout(timeout.getOrElse(0L)) + + @deprecated("Use self.isTerminated instead", "2.0") + def isShutdown: Boolean = self.isTerminated + + @deprecated("Use sender instead", "2.0") + def channel() = context.sender + + @deprecated("Use sender instead", "2.0") + def sender() = Some(context.sender) + + @deprecated("Use sender ! instead", "2.0") + def reply(message: Any) = context.sender.!(message, context.self) + + @deprecated("Use sender ! instead", "2.0") + def tryReply(message: Any): Boolean = { + reply(message) + true + } + + @deprecated("Use sender ! instead", "2.0") + def tryTell(message: Any)(implicit sender: ActorRef = context.self): Boolean = { + actorRef.!(message)(sender) + true + } + + @deprecated("Use sender ! akka.actor.Status.Failure(e) instead", "2.0") + def sendException(ex: Throwable): Boolean = { + context.sender.!(akka.actor.Status.Failure(ex), context.self) + true + } + } +} + +class OldRemoteSupport { + + @deprecated("remote.start is not needed", "2.0") + def start() {} + + @deprecated("remote.start is not needed, use configuration to specify RemoteActorRefProvider, host and port", "2.0") + def start(host: String, port: Int) {} + + @deprecated("remote.start is not needed, use configuration to specify RemoteActorRefProvider, host and port", "2.0") + def start(host: String, port: Int, loader: ClassLoader) {} + + @deprecated("remote.shutdown is not needed", "2.0") + def shutdown() {} + + @deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0") + def actorFor(classNameOrServiceId: String, hostname: String, port: Int): ActorRef = + GlobalActorSystem.actorFor("akka://%s@%s:%s/user/%s".format(GlobalActorSystem.name, hostname, port, classNameOrServiceId)) + + @deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0") + def actorFor(classNameOrServiceId: String, hostname: String, port: Int, loader: ClassLoader): ActorRef = + actorFor(classNameOrServiceId, hostname, port) + + @deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0") + def actorFor(serviceId: String, className: String, hostname: String, port: Int): ActorRef = + actorFor(serviceId, hostname, port) + + @deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0") + def actorFor(serviceId: String, className: String, hostname: String, port: Int, loader: ClassLoader): ActorRef = + actorFor(serviceId, hostname, port) + + @deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0") + def actorFor(classNameOrServiceId: String, timeout: Long, hostname: String, port: Int): ActorRef = + actorFor(classNameOrServiceId, hostname, port) + + @deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0") + def actorFor(classNameOrServiceId: String, timeout: Long, hostname: String, port: Int, loader: ClassLoader): ActorRef = + actorFor(classNameOrServiceId, hostname, port) + + @deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0") + def actorFor(serviceId: String, className: String, timeout: Long, hostname: String, port: Int): ActorRef = + actorFor(serviceId, hostname, port) + +} \ No newline at end of file diff --git a/akka-actor-migration/src/main/scala/akka/actor/OldScheduler.scala b/akka-actor-migration/src/main/scala/akka/actor/OldScheduler.scala new file mode 100644 index 0000000000..7b487bf5db --- /dev/null +++ b/akka-actor-migration/src/main/scala/akka/actor/OldScheduler.scala @@ -0,0 +1,75 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.actor + +import java.util.concurrent.TimeUnit +import akka.util.Duration + +/** + * Migration replacement for `object akka.actor.Scheduler`. + */ +@deprecated("use ActorSystem.scheduler instead", "2.0") +object OldScheduler { + + /** + * Schedules to send the specified message to the receiver after initialDelay and then repeated after delay + */ + @deprecated("use ActorSystem.scheduler instead", "2.0") + def schedule(receiver: ActorRef, message: Any, initialDelay: Long, delay: Long, timeUnit: TimeUnit): Cancellable = + GlobalActorSystem.scheduler.schedule( + Duration(initialDelay, timeUnit), + Duration(delay, timeUnit), + receiver, + message) + + /** + * Schedules to run specified function to the receiver after initialDelay and then repeated after delay + */ + @deprecated("use ActorSystem.scheduler instead", "2.0") + def schedule(f: () ⇒ Unit, initialDelay: Long, delay: Long, timeUnit: TimeUnit): Cancellable = + GlobalActorSystem.scheduler.schedule( + Duration(initialDelay, timeUnit), + Duration(delay, timeUnit), + new Runnable { def run = f() }) + + /** + * Schedules to run specified runnable to the receiver after initialDelay and then repeated after delay. + */ + @deprecated("use ActorSystem.scheduler instead", "2.0") + def schedule(runnable: Runnable, initialDelay: Long, delay: Long, timeUnit: TimeUnit): Cancellable = + GlobalActorSystem.scheduler.schedule( + Duration(initialDelay, timeUnit), + Duration(delay, timeUnit), + runnable) + + /** + * Schedules to send the specified message to the receiver after delay + */ + @deprecated("use ActorSystem.scheduler instead", "2.0") + def scheduleOnce(receiver: ActorRef, message: Any, delay: Long, timeUnit: TimeUnit): Cancellable = + GlobalActorSystem.scheduler.scheduleOnce( + Duration(delay, timeUnit), + receiver, + message) + + /** + * Schedules a function to be run after delay. + */ + @deprecated("use ActorSystem.scheduler instead", "2.0") + def scheduleOnce(f: () ⇒ Unit, delay: Long, timeUnit: TimeUnit): Cancellable = + GlobalActorSystem.scheduler.scheduleOnce( + Duration(delay, timeUnit), + new Runnable { def run = f() }) + + /** + * Schedules a runnable to be run after delay, + */ + @deprecated("use ActorSystem.scheduler instead", "2.0") + def scheduleOnce(runnable: Runnable, delay: Long, timeUnit: TimeUnit): Cancellable = + GlobalActorSystem.scheduler.scheduleOnce( + Duration(delay, timeUnit), + runnable) + +} + diff --git a/akka-actor-migration/src/main/scala/akka/config/OldConfig.scala b/akka-actor-migration/src/main/scala/akka/config/OldConfig.scala new file mode 100644 index 0000000000..69d7e15d41 --- /dev/null +++ b/akka-actor-migration/src/main/scala/akka/config/OldConfig.scala @@ -0,0 +1,162 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.config +import akka.actor.GlobalActorSystem +import com.typesafe.config.Config + +/** + * Migration replacement for `object akka.config.Config`. + */ +@deprecated("use ActorSystem.settings.config instead", "2.0") +object OldConfig { + + val config = new OldConfiguration(GlobalActorSystem.settings.config) + +} + +/** + * Migration adapter for `akka.config.Configuration` + */ +@deprecated("use ActorSystem.settings.config (com.typesafe.config.Config) instead", "2.0") +class OldConfiguration(config: Config) { + + import scala.collection.JavaConverters._ + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def contains(key: String): Boolean = config.hasPath(key) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def keys: Iterable[String] = config.root.keySet.asScala + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getAny(key: String): Option[Any] = { + try { + Option(config.getAnyRef(key)) + } catch { + case _ ⇒ None + } + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getAny(key: String, defaultValue: Any): Any = getAny(key).getOrElse(defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getSeqAny(key: String): Seq[Any] = { + try { + config.getAnyRefList(key).asScala + } catch { + case _ ⇒ Seq.empty[Any] + } + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getString(key: String): Option[String] = + try { + Option(config.getString(key)) + } catch { + case _ ⇒ None + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getString(key: String, defaultValue: String): String = getString(key).getOrElse(defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getList(key: String): Seq[String] = { + try { + config.getStringList(key).asScala + } catch { + case _ ⇒ Seq.empty[String] + } + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getInt(key: String): Option[Int] = { + try { + Option(config.getInt(key)) + } catch { + case _ ⇒ None + } + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getInt(key: String, defaultValue: Int): Int = getInt(key).getOrElse(defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getLong(key: String): Option[Long] = { + try { + Option(config.getLong(key)) + } catch { + case _ ⇒ None + } + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getLong(key: String, defaultValue: Long): Long = getLong(key).getOrElse(defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getFloat(key: String): Option[Float] = { + try { + Option(config.getDouble(key).toFloat) + } catch { + case _ ⇒ None + } + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getFloat(key: String, defaultValue: Float): Float = getFloat(key).getOrElse(defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getDouble(key: String): Option[Double] = { + try { + Option(config.getDouble(key)) + } catch { + case _ ⇒ None + } + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getDouble(key: String, defaultValue: Double): Double = getDouble(key).getOrElse(defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getBoolean(key: String): Option[Boolean] = { + try { + Option(config.getBoolean(key)) + } catch { + case _ ⇒ None + } + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getBoolean(key: String, defaultValue: Boolean): Boolean = getBoolean(key).getOrElse(defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getBool(key: String): Option[Boolean] = getBoolean(key) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getBool(key: String, defaultValue: Boolean): Boolean = getBoolean(key, defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def apply(key: String): String = getString(key) match { + case None ⇒ throw new ConfigurationException("undefined config: " + key) + case Some(v) ⇒ v + } + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def apply(key: String, defaultValue: String) = getString(key, defaultValue) + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def apply(key: String, defaultValue: Int) = getInt(key, defaultValue) + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def apply(key: String, defaultValue: Long) = getLong(key, defaultValue) + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def apply(key: String, defaultValue: Boolean) = getBool(key, defaultValue) + + @deprecated("use new com.typesafe.config.Config API instead", "2.0") + def getSection(name: String): Option[OldConfiguration] = { + try { + Option(new OldConfiguration(config.getConfig(name))) + } catch { + case _ ⇒ None + } + } +} \ No newline at end of file diff --git a/akka-actor-migration/src/main/scala/akka/dispatch/OldFuture.scala b/akka-actor-migration/src/main/scala/akka/dispatch/OldFuture.scala new file mode 100644 index 0000000000..f53a3dd11b --- /dev/null +++ b/akka-actor-migration/src/main/scala/akka/dispatch/OldFuture.scala @@ -0,0 +1,65 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.dispatch + +import java.util.concurrent.TimeoutException +import akka.util.duration._ +import akka.AkkaException +import akka.util.BoxedType +import akka.util.Duration +import akka.actor.GlobalActorSystem + +/** + * Some old methods made available through implicit conversion in + * [[akka.migration]]. + */ +@deprecated("use new Future api instead", "2.0") +class OldFuture[T](future: Future[T]) { + + @deprecated("use akka.dispatch.Await.result instead", "2.0") + def get: T = try { + Await.result(future, GlobalActorSystem.settings.ActorTimeout.duration) + } catch { + case e: TimeoutException ⇒ throw new FutureTimeoutException(e.getMessage, e) + } + + @deprecated("use akka.dispatch.Await.ready instead", "2.0") + def await: Future[T] = await(GlobalActorSystem.settings.ActorTimeout.duration) + + @deprecated("use akka.dispatch.Await.ready instead", "2.0") + def await(atMost: Duration) = try { + Await.ready(future, atMost) + future + } catch { + case e: TimeoutException ⇒ throw new FutureTimeoutException(e.getMessage, e) + } + + @deprecated("use new Future api instead", "2.0") + def as[A](implicit m: Manifest[A]): Option[A] = { + try await catch { case _: FutureTimeoutException ⇒ } + future.value match { + case None ⇒ None + case Some(Left(ex)) ⇒ throw ex + case Some(Right(v)) ⇒ Some(BoxedType(m.erasure).cast(v).asInstanceOf[A]) + } + } + + @deprecated("use new Future api instead", "2.0") + def asSilently[A](implicit m: Manifest[A]): Option[A] = { + try await catch { case _: FutureTimeoutException ⇒ } + future.value match { + case None ⇒ None + case Some(Left(ex)) ⇒ throw ex + case Some(Right(v)) ⇒ + try Some(BoxedType(m.erasure).cast(v).asInstanceOf[A]) + catch { case _: ClassCastException ⇒ None } + } + } + +} + +@deprecated("Await throws java.util.concurrent.TimeoutException", "2.0") +class FutureTimeoutException(message: String, cause: Throwable = null) extends AkkaException(message, cause) { + def this(message: String) = this(message, null) +} \ No newline at end of file diff --git a/akka-actor-migration/src/main/scala/akka/event/OldEventHandler.scala b/akka-actor-migration/src/main/scala/akka/event/OldEventHandler.scala new file mode 100644 index 0000000000..ef5846bc5c --- /dev/null +++ b/akka-actor-migration/src/main/scala/akka/event/OldEventHandler.scala @@ -0,0 +1,81 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.event + +import akka.actor.GlobalActorSystem + +/** + * Migration replacement for `akka.event.EventHandler` + */ +@deprecated("use Logging instead", "2.0") +object OldEventHandler { + + @deprecated("use Logging instead", "2.0") + def error(cause: Throwable, instance: AnyRef, message: ⇒ String) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isErrorEnabled) log.error(cause, message) + } + + @deprecated("use Logging instead", "2.0") + def error(cause: Throwable, instance: AnyRef, message: Any) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isErrorEnabled) log.error(cause, message.toString) + } + + @deprecated("use Logging instead", "2.0") + def error(instance: AnyRef, message: ⇒ String) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isErrorEnabled) log.error(message.toString) + } + + @deprecated("use Logging instead", "2.0") + def error(instance: AnyRef, message: Any) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isErrorEnabled) log.error(message.toString) + } + + @deprecated("use Logging instead", "2.0") + def warning(instance: AnyRef, message: ⇒ String) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isWarningEnabled) log.warning(message) + } + + @deprecated("use Logging instead", "2.0") + def warning(instance: AnyRef, message: Any) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isWarningEnabled) log.warning(message.toString) + } + + @deprecated("use Logging instead", "2.0") + def info(instance: AnyRef, message: ⇒ String) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isInfoEnabled) log.info(message) + } + + @deprecated("use Logging instead", "2.0") + def info(instance: AnyRef, message: Any) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isInfoEnabled) log.info(message.toString) + } + + @deprecated("use Logging instead", "2.0") + def debug(instance: AnyRef, message: ⇒ String) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isDebugEnabled) log.debug(message) + } + + @deprecated("use Logging instead", "2.0") + def debug(instance: AnyRef, message: Any) { + val log = Logging.getLogger(GlobalActorSystem, instance) + if (log.isDebugEnabled) log.debug(message.toString) + } + + @deprecated("use Logging instead", "2.0") + def isInfoEnabled = Logging.getLogger(GlobalActorSystem, this).isInfoEnabled + + @deprecated("use Logging instead", "2.0") + def isDebugEnabled = Logging.getLogger(GlobalActorSystem, this).isDebugEnabled + +} diff --git a/akka-actor-migration/src/main/scala/akka/migration/package.scala b/akka-actor-migration/src/main/scala/akka/migration/package.scala new file mode 100644 index 0000000000..319fdd997e --- /dev/null +++ b/akka-actor-migration/src/main/scala/akka/migration/package.scala @@ -0,0 +1,34 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka + +import akka.dispatch.Future +import akka.dispatch.OldFuture +import akka.util.Timeout +import akka.actor.GlobalActorSystem +import akka.dispatch.MessageDispatcher +import akka.actor.ActorRef + +package object migration { + + implicit def future2OldFuture[T](future: Future[T]): OldFuture[T] = new OldFuture[T](future) + + implicit def askTimeout: Timeout = GlobalActorSystem.settings.ActorTimeout + + implicit def defaultDispatcher: MessageDispatcher = GlobalActorSystem.dispatcher + + implicit def actorRef2OldActorRef(actorRef: ActorRef) = new OldActorRef(actorRef) + + class OldActorRef(actorRef: ActorRef) { + @deprecated("Actors are automatically started when creatd, i.e. remove old call to start()", "2.0") + def start(): ActorRef = actorRef + + @deprecated("Stop with ActorSystem or ActorContext instead", "2.0") + def exit() = stop() + + @deprecated("Stop with ActorSystem or ActorContext instead", "2.0") + def stop(): Unit = GlobalActorSystem.stop(actorRef) + } + +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 69589ae651..5e7c9ae701 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -515,7 +515,7 @@ object ScatterGatherFirstCompletedRouter { * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. */ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration, - override val resizer: Option[Resizer] = None) + override val resizer: Option[Resizer] = None) extends RouterConfig with ScatterGatherFirstCompletedLike { /** @@ -593,57 +593,57 @@ case class DefaultResizer( */ lowerBound: Int = 1, /** - * The most number of routees the router should ever have. - * Must be greater than or equal to `lowerBound`. - */ + * The most number of routees the router should ever have. + * Must be greater than or equal to `lowerBound`. + */ upperBound: Int = 10, /** - * Threshold to evaluate if routee is considered to be busy (under pressure). - * Implementation depends on this value (default is 1). - *

      - *
    • 0: number of routees currently processing a message.
    • - *
    • 1: number of routees currently processing a message has - * some messages in mailbox.
    • - *
    • > 1: number of routees with at least the configured `pressureThreshold` - * messages in their mailbox. Note that estimating mailbox size of - * default UnboundedMailbox is O(N) operation.
    • - *
    - */ + * Threshold to evaluate if routee is considered to be busy (under pressure). + * Implementation depends on this value (default is 1). + *
      + *
    • 0: number of routees currently processing a message.
    • + *
    • 1: number of routees currently processing a message has + * some messages in mailbox.
    • + *
    • > 1: number of routees with at least the configured `pressureThreshold` + * messages in their mailbox. Note that estimating mailbox size of + * default UnboundedMailbox is O(N) operation.
    • + *
    + */ pressureThreshold: Int = 1, /** - * Percentage to increase capacity whenever all routees are busy. - * For example, 0.2 would increase 20% (rounded up), i.e. if current - * capacity is 6 it will request an increase of 2 more routees. - */ + * Percentage to increase capacity whenever all routees are busy. + * For example, 0.2 would increase 20% (rounded up), i.e. if current + * capacity is 6 it will request an increase of 2 more routees. + */ rampupRate: Double = 0.2, /** - * Minimum fraction of busy routees before backing off. - * For example, if this is 0.3, then we'll remove some routees only when - * less than 30% of routees are busy, i.e. if current capacity is 10 and - * 3 are busy then the capacity is unchanged, but if 2 or less are busy - * the capacity is decreased. - * - * Use 0.0 or negative to avoid removal of routees. - */ + * Minimum fraction of busy routees before backing off. + * For example, if this is 0.3, then we'll remove some routees only when + * less than 30% of routees are busy, i.e. if current capacity is 10 and + * 3 are busy then the capacity is unchanged, but if 2 or less are busy + * the capacity is decreased. + * + * Use 0.0 or negative to avoid removal of routees. + */ backoffThreshold: Double = 0.3, /** - * Fraction of routees to be removed when the resizer reaches the - * backoffThreshold. - * For example, 0.1 would decrease 10% (rounded up), i.e. if current - * capacity is 9 it will request an decrease of 1 routee. - */ + * Fraction of routees to be removed when the resizer reaches the + * backoffThreshold. + * For example, 0.1 would decrease 10% (rounded up), i.e. if current + * capacity is 9 it will request an decrease of 1 routee. + */ backoffRate: Double = 0.1, /** - * When the resizer reduce the capacity the abandoned routee actors are stopped - * with PoisonPill after this delay. The reason for the delay is to give concurrent - * messages a chance to be placed in mailbox before sending PoisonPill. - * Use 0 seconds to skip delay. - */ + * When the resizer reduce the capacity the abandoned routee actors are stopped + * with PoisonPill after this delay. The reason for the delay is to give concurrent + * messages a chance to be placed in mailbox before sending PoisonPill. + * Use 0 seconds to skip delay. + */ stopDelay: Duration = 1.second, /** - * Number of messages between resize operation. - * Use 1 to resize before each message. - */ + * Number of messages between resize operation. + * Use 1 to resize before each message. + */ messagesPerResize: Int = 10) extends Resizer { /** diff --git a/akka-docs/general/jmm.rst b/akka-docs/general/jmm.rst index 23871449ef..7d806f9ac8 100644 --- a/akka-docs/general/jmm.rst +++ b/akka-docs/general/jmm.rst @@ -1,3 +1,5 @@ +.. _jmm: + Akka and the Java Memory Model ================================ diff --git a/akka-docs/project/migration-guide-1.3.x-2.0.x.rst b/akka-docs/project/migration-guide-1.3.x-2.0.x.rst index 44fd51884c..0c8d239d03 100644 --- a/akka-docs/project/migration-guide-1.3.x-2.0.x.rst +++ b/akka-docs/project/migration-guide-1.3.x-2.0.x.rst @@ -4,6 +4,10 @@ Migration Guide 1.3.x to 2.0.x ################################ +.. sidebar:: Contents + + .. contents:: :local: + Actors ====== @@ -13,9 +17,177 @@ significant amount of time. Detailed migration guide will be written. +Migration Kit +============= + +Nobody likes a big refactoring that takes several days to complete until +anything is able to run again. Therefore we provide a migration kit that +makes it possible to do the migration changes in smaller steps. + +The migration kit only covers the most common usage of Akka. It is not intended +as a final solution. The whole migration kit is deprecated and will be removed in +Akka 2.1. + +The migration kit is provided in separate jar files. Add the following dependency:: + + "com.typesafe.akka" % "akka-actor-migration" % "2.0-SNAPSHOT" + +The first step of the migration is to do some trivial replacements. +Search and replace the following (be careful with the non qualified names): + +==================================== ==================================== +Search Replace with +==================================== ==================================== +``akka.actor.Actor`` ``akka.actor.OldActor`` +``extends Actor`` ``extends OldActor`` +``akka.actor.Scheduler`` ``akka.actor.OldScheduler`` +``Scheduler`` ``OldScheduler`` +``akka.event.EventHandler`` ``akka.event.OldEventHandler`` +``EventHandler`` ``OldEventHandler`` +``akka.config.Config`` ``akka.config.OldConfig`` +``Config`` ``OldConfig`` +==================================== ==================================== + +For Scala users the migration kit also contains some implicit conversions to be +able to use some old methods. These conversions are useful from tests or other +code used outside actors. + +:: + + import akka.migration._ + +Thereafter you need to fix compilation errors that are not handled by the migration +kit, such as: + +* Definition of supervisors +* Definition of dispatchers +* ActorRegistry + +When everything compiles you continue by replacing/removing the ``OldXxx`` classes +one-by-one from the migration kit with appropriate migration. + +When using the migration kit there will be one global actor system, which loads +the configuration ``akka.conf`` from the same locations as in Akka 1.x. +This means that while you are using the migration kit you should not create your +own ``ActorSystem``, but instead use the ``akka.actor.GlobalActorSystem``. +In order to voluntarily exit the JVM you must ``shutdown`` the ``GlobalActorSystem`` +Last task of the migration would be to create your own ``ActorSystem``. + + Unordered Collection of Migration Items ======================================= +Creating and starting actors +---------------------------- + +Actors are created by passing in a ``Props`` instance into the actorOf factory method in +a ``ActorRefProvider``, which is the ``ActorSystem`` or ``ActorContext``. +Use the system to create top level actors. Use the context to +create actors from other actors. The difference is how the supervisor hierarchy is arranged. +When using the context the current actor will be supervisor of the created child actor. +When using the system it will be a top level actor, that is supervised by the system +(internal guardian actor). + +``ActorRef.start()`` has been removed. Actors are now started automatically when created. +Remove all invocations of ``ActorRef.start()``. + +v1.3:: + + val myActor = Actor.actorOf[MyActor] + myActor.start() + +v2.0:: + + // top level actor + val firstActor = system.actorOf(Props[FirstActor], name = "first") + + // child actor + class FirstActor extends Actor { + val myActor = context.actorOf(Props[MyActor], name = "myactor") + +Documentation: + + * :ref:`actors-scala` + * :ref:`untyped-actors-java` + +Stopping actors +--------------- + +``ActorRef.stop()`` has been moved. Use ``ActorSystem`` or ``ActorContext`` to stop actors. + +v1.3:: + + actorRef.stop() + self.stop() + actorRef ! PoisonPill + +v2.0:: + + context.stop(someChild) + context.stop(self) + system.stop(actorRef) + actorRef ! PoisonPill + +*Stop all actors* + +v1.3:: + + ActorRegistry.shutdownAll() + +v2.0:: + + system.shutdown() + +Documentation: + + * :ref:`actors-scala` + * :ref:`untyped-actors-java` + +Identifying Actors +------------------ + +In v1.3 actors have ``uuid`` and ``id`` field. In v2.0 each actor has a unique logical ``path``. + +The ``ActorRegistry`` has been replaced by actor paths and lookup with +``actorFor`` in ``ActorRefProvider`` (``ActorSystem`` or ``ActorContext``). + +v1.3:: + + val actor = Actor.registry.actorFor(uuid) + val actors = Actor.registry.actorsFor(id) + +v2.0:: + + val actor = context.actorFor("/user/serviceA/aggregator") + +Documentation: + + * :ref:`addressing` + * :ref:`actors-scala` + * :ref:`untyped-actors-java` + +Reply to messages +----------------- + +``self.channel`` has been replaced with unified reply mechanism using ``sender`` (Scala) +or ``getSender()`` (Java). This works for both tell (!) and ask (?). + +v1.3:: + + self.channel ! result + self.channel tryTell result + self.reply(result) + self.tryReply(result) + +v2.0:: + + sender ! result + +Documentation: + + * :ref:`actors-scala` + * :ref:`untyped-actors-java` + ``ActorRef.ask()`` ------------------ @@ -28,7 +200,185 @@ reply to be received; it is independent of the timeout applied when awaiting completion of the :class:`Future`, however, the actor will complete the :class:`Future` with an :class:`AskTimeoutException` when it stops itself. +Documentation: + + * :ref:`actors-scala` + * :ref:`untyped-actors-java` + ActorPool --------- The ActorPool has been replaced by dynamically resizable routers. + +Documentation: + + * :ref:`routing-scala` + * :ref:`routing-java` + +``UntypedActor.getContext()`` (Java API only) +--------------------------------------------- + +``getContext()`` in the Java API for UntypedActor is renamed to +``getSelf()``. + +v1.3:: + + actorRef.tell("Hello", getContext()); + +v2.0:: + + actorRef.tell("Hello", getSelf()); + +Documentation: + + * :ref:`untyped-actors-java` + +Logging +------- + +EventHandler API has been replaced by LoggingAdapter, which publish log messages +to the event bus. You can still plugin your own actor as event listener with the +``akka.event-handlers`` configuration property. + +v1.3:: + + EventHandler.error(exception, this, message) + EventHandler.warning(this, message) + EventHandler.info(this, message) + EventHandler.debug(this, message) + EventHandler.debug(this, "Processing took %s ms".format(duration)) + +v2.0:: + + import akka.event.Logging + + val log = Logging(context.system, this) + log.error(exception, this, message) + log.warning(this, message) + log.info(this, message) + log.debug(this, message) + log.debug(this, "Processing took {} ms", duration) + +Documentation: + + * :ref:`logging-scala` + * :ref:`logging-java` + * :ref:`event-bus-scala` + * :ref:`event-bus-java` + +Supervision +----------- + +Akka v2.0 implements parental supervision. Actors can only be created by other actors — where the top-level +actor is provided by the library — and each created actor is supervised by its parent. +In contrast to the special supervision relationship between parent and child, each actor may monitor any +other actor for termination. + +v1.3:: + + self.link(actorRef) + self.unlink(actorRef) + +v2.0:: + + class WatchActor extends Actor { + val actorRef = ... + // Terminated message will be delivered when the actorRef actor + // is stopped + context.watch(actorRef) + + val supervisedChild = context.actorOf(Props[ChildActor]) + + def receive = { + case Terminated(`actorRef`) ⇒ ... + } + } + +Note that ``link`` in v1.3 established a supervision relation, which ``watch`` doesn't. +``watch`` is only a way to get notification, ``Terminated`` message, when the monitored +actor has been stopped. + +*Refererence to the supervisor* + +v1.3:: + + self.supervisor + +v2.0:: + + context.parent + +*Fault handling strategy* + +v1.3:: + + val supervisor = Supervisor( + SupervisorConfig( + AllForOneStrategy(List(classOf[Exception]), 3, 1000), + Supervise( + actorOf[MyActor1], + Permanent) :: + Supervise( + actorOf[MyActor2], + Permanent) :: + Nil)) + +v2.0:: + + val strategy = OneForOneStrategy({ + case _: ArithmeticException ⇒ Resume + case _: NullPointerException ⇒ Restart + case _: IllegalArgumentException ⇒ Stop + case _: Exception ⇒ Escalate + }: Decider, maxNrOfRetries = Some(10), withinTimeRange = Some(60000)) + + val supervisor = system.actorOf(Props[Supervisor].withFaultHandler(strategy), "supervisor") + +Documentation: + + * :ref:`supervision` + * :ref:`fault-tolerance-java` + * :ref:`fault-tolerance-scala` + * :ref:`actors-scala` + * :ref:`untyped-actors-java` + +Spawn +----- + +``spawn`` has been removed and can be implemented like this, if needed. Be careful to not +access any shared mutable state closed over by the body. + +:: + + def spawn(body: ⇒ Unit) { + system.actorOf(Props(ctx ⇒ { case "go" ⇒ try body finally ctx.stop(ctx.self) })) ! "go" + } + +Documentation: + + * :ref:`jmm` + +HotSwap +------- + +In v2.0 ``become`` and ``unbecome`` metods are located in ``ActorContext``, i.e. ``context.become`` and ``context.unbecome``. + +The special ``HotSwap`` and ``RevertHotswap`` messages in v1.3 has been removed. Similar can be +implemented with your own message and using ``context.become`` and ``context.unbecome`` +in the actor receiving the message. + + * :ref:`actors-scala` + * :ref:`untyped-actors-java` + +More to be written +------------------ + +* Futures +* Dispatchers +* STM +* TypedActors +* Routing +* Remoting +* Scheduler +* Configuration +* ...? \ No newline at end of file diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index 20ac33480b..36439d2a16 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -7,6 +7,9 @@ package akka.docs.actor import akka.actor.Actor import akka.actor.Props import akka.event.Logging + +//#imports1 + import akka.dispatch.Future import akka.actor.ActorSystem import org.scalatest.{ BeforeAndAfterAll, WordSpec } diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 8f6e656e46..7b6808a944 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -30,7 +30,7 @@ object AkkaBuild extends Build { Unidoc.unidocExclude := Seq(samples.id, tutorials.id), Dist.distExclude := Seq(actorTests.id, akkaSbtPlugin.id, docs.id) ), - aggregate = Seq(actor, testkit, actorTests, remote, slf4j, agent, transactor, mailboxes, kernel, akkaSbtPlugin, samples, tutorials, docs) + aggregate = Seq(actor, testkit, actorTests, remote, slf4j, agent, transactor, mailboxes, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) ) lazy val actor = Project( @@ -212,6 +212,13 @@ object AkkaBuild extends Build { ) ) + lazy val actorMigration = Project( + id = "akka-actor-migration", + base = file("akka-actor-migration"), + dependencies = Seq(actor, testkit % "test->test"), + settings = defaultSettings + ) + lazy val akkaSbtPlugin = Project( id = "akka-sbt-plugin", base = file("akka-sbt-plugin"), From d0498eb32efeed165eeda9c5be10e3147316ca8b Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 11 Jan 2012 14:14:08 +0100 Subject: [PATCH 39/84] add Class[_] to LogEvent - it is customary to use class name for categorizing logs, hence we should support it; class is taken from logSource.getClass - update SLF4J module to use logClass as category and set logSource in MDC "akkaSource" - add docs --- .../test/scala/akka/actor/FSMActorSpec.scala | 12 +- .../akka/actor/dispatch/ActorModelSpec.scala | 12 +- .../scala/akka/event/EventStreamSpec.scala | 2 +- .../scala/akka/event/LoggingReceiveSpec.scala | 36 ++-- .../src/main/scala/akka/actor/ActorCell.scala | 43 ++-- .../main/scala/akka/actor/ActorSystem.scala | 2 +- .../akka/dispatch/AbstractDispatcher.scala | 2 +- .../main/scala/akka/dispatch/Dispatcher.scala | 2 +- .../scala/akka/dispatch/Dispatchers.scala | 2 +- .../src/main/scala/akka/dispatch/Future.scala | 4 +- .../main/scala/akka/dispatch/Mailbox.scala | 2 +- .../main/scala/akka/event/EventStream.scala | 6 +- .../src/main/scala/akka/event/Logging.scala | 189 ++++++++++++++---- .../scala/akka/event/LoggingReceive.scala | 2 +- akka-actor/src/main/scala/akka/util/JMX.scala | 4 +- akka-docs/java/logging.rst | 17 +- .../code/akka/docs/actor/ActorDocSpec.scala | 4 +- .../code/akka/docs/event/LoggingDocSpec.scala | 10 +- akka-docs/scala/logging.rst | 17 +- .../src/main/scala/akka/remote/Remote.scala | 2 +- .../remote/netty/NettyRemoteSupport.scala | 2 +- .../akka/remote/RemoteDeathWatchSpec.scala | 2 + .../main/scala/akka/event/slf4j/SLF4J.scala | 36 ++-- .../akka/testkit/TestEventListener.scala | 20 +- .../scala/akka/testkit/TestActorRefSpec.scala | 2 +- 25 files changed, 295 insertions(+), 137 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index a6d6a7df98..c5ed765aab 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -171,7 +171,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im system.eventStream.subscribe(testActor, classOf[Logging.Error]) fsm ! "go" expectMsgPF(1 second, hint = "Next state 2 does not exist") { - case Logging.Error(_, `name`, "Next state 2 does not exist") ⇒ true + case Logging.Error(_, `name`, _, "Next state 2 does not exist") ⇒ true } system.eventStream.unsubscribe(testActor) } @@ -221,15 +221,15 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im system.eventStream.subscribe(testActor, classOf[Logging.Debug]) fsm ! "go" expectMsgPF(1 second, hint = "processing Event(go,null)") { - case Logging.Debug(`name`, s: String) if s.startsWith("processing Event(go,null) from Actor[") ⇒ true + case Logging.Debug(`name`, _, s: String) if s.startsWith("processing Event(go,null) from Actor[") ⇒ true } - expectMsg(1 second, Logging.Debug(name, "setting timer 't'/1500 milliseconds: Shutdown")) - expectMsg(1 second, Logging.Debug(name, "transition 1 -> 2")) + expectMsg(1 second, Logging.Debug(name, fsm.underlyingActor.getClass, "setting timer 't'/1500 milliseconds: Shutdown")) + expectMsg(1 second, Logging.Debug(name, fsm.underlyingActor.getClass, "transition 1 -> 2")) fsm ! "stop" expectMsgPF(1 second, hint = "processing Event(stop,null)") { - case Logging.Debug(`name`, s: String) if s.startsWith("processing Event(stop,null) from Actor[") ⇒ true + case Logging.Debug(`name`, _, s: String) if s.startsWith("processing Event(stop,null) from Actor[") ⇒ true } - expectMsgAllOf(1 second, Logging.Debug(name, "canceling timer 't'"), Normal) + expectMsgAllOf(1 second, Logging.Debug(name, fsm.underlyingActor.getClass, "canceling timer 't'"), Normal) expectNoMsg(1 second) system.eventStream.unsubscribe(testActor) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 9debbd053c..b0d831dc77 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -151,7 +151,7 @@ object ActorModelSpec { await(deadline)(stops == dispatcher.stops.get) } catch { case e ⇒ - system.eventStream.publish(Error(e, dispatcher.toString, "actual: stops=" + dispatcher.stops.get + + system.eventStream.publish(Error(e, dispatcher.toString, dispatcher.getClass, "actual: stops=" + dispatcher.stops.get + " required: stops=" + stops)) throw e } @@ -208,9 +208,11 @@ object ActorModelSpec { await(deadline)(stats.restarts.get() == restarts) } catch { case e ⇒ - system.eventStream.publish(Error(e, Option(dispatcher).toString, "actual: " + stats + ", required: InterceptorStats(susp=" + suspensions + - ",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters + - ",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts)) + system.eventStream.publish(Error(e, Option(dispatcher).toString, + if (dispatcher ne null) dispatcher.getClass else this.getClass, + "actual: " + stats + ", required: InterceptorStats(susp=" + suspensions + + ",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters + + ",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts)) throw e } } @@ -311,7 +313,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa try { f } catch { - case e ⇒ system.eventStream.publish(Error(e, "spawn", "error in spawned thread")) + case e ⇒ system.eventStream.publish(Error(e, "spawn", this.getClass, "error in spawned thread")) } } } diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index f6e5b92201..9a41c80f6d 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -108,7 +108,7 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { private def verifyLevel(bus: LoggingBus, level: Logging.LogLevel) { import Logging._ - val allmsg = Seq(Debug("", "debug"), Info("", "info"), Warning("", "warning"), Error("", "error")) + val allmsg = Seq(Debug("", null, "debug"), Info("", null, "info"), Warning("", null, "warning"), Error("", null, "error")) val msg = allmsg filter (_.level <= level) allmsg foreach bus.publish msg foreach (x ⇒ expectMsg(x)) diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala index 15f8646d4b..6d524729dd 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala @@ -59,7 +59,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd } val log = LoggingReceive("funky")(r) log.isDefinedAt("hallo") - expectMsg(1 second, Logging.Debug("funky", "received unhandled message hallo")) + expectMsg(1 second, Logging.Debug("funky", classOf[String], "received unhandled message hallo")) } } @@ -83,7 +83,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd val name = actor.path.toString actor ! "buh" within(1 second) { - expectMsg(Logging.Debug(name, "received handled message buh")) + expectMsg(Logging.Debug(name, actor.underlyingActor.getClass, "received handled message buh")) expectMsg("x") } @@ -109,7 +109,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd }) actor ! "buh" within(1 second) { - expectMsg(Logging.Debug(actor.path.toString, "received handled message buh")) + expectMsg(Logging.Debug(actor.path.toString, actor.underlyingActor.getClass, "received handled message buh")) expectMsg("x") } } @@ -130,7 +130,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd val name = actor.path.toString actor ! PoisonPill expectMsgPF() { - case Logging.Debug(`name`, msg: String) if msg startsWith "received AutoReceiveMessage Envelope(PoisonPill" ⇒ true + case Logging.Debug(`name`, _, msg: String) if msg startsWith "received AutoReceiveMessage Envelope(PoisonPill" ⇒ true } awaitCond(actor.isTerminated, 100 millis) } @@ -142,7 +142,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd val sys = impl.systemGuardian.path.toString ignoreMute(this) ignoreMsg { - case Logging.Debug(s, _) ⇒ s.contains("MainBusReaper") || s == sys + case Logging.Debug(`sys`, _, _) ⇒ true } system.eventStream.subscribe(testActor, classOf[Logging.Debug]) system.eventStream.subscribe(testActor, classOf[Logging.Error]) @@ -151,51 +151,53 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd val lname = lifecycleGuardian.path.toString val supervisor = TestActorRef[TestLogActor](Props[TestLogActor].withFaultHandler(OneForOneStrategy(List(classOf[Throwable]), 5, 5000))) val sname = supervisor.path.toString + val sclass = classOf[TestLogActor] val supervisorSet = receiveWhile(messages = 2) { - case Logging.Debug(`lname`, msg: String) if msg startsWith "now supervising" ⇒ 1 - case Logging.Debug(`sname`, msg: String) if msg startsWith "started" ⇒ 2 + case Logging.Debug(`lname`, _, msg: String) if msg startsWith "now supervising" ⇒ 1 + case Logging.Debug(`sname`, `sclass`, msg: String) if msg startsWith "started" ⇒ 2 }.toSet expectNoMsg(Duration.Zero) assert(supervisorSet == Set(1, 2), supervisorSet + " was not Set(1, 2)") val actor = TestActorRef[TestLogActor](Props[TestLogActor], supervisor, "none") val aname = actor.path.toString + val aclass = classOf[TestLogActor] val set = receiveWhile(messages = 2) { - case Logging.Debug(`sname`, msg: String) if msg startsWith "now supervising" ⇒ 1 - case Logging.Debug(`aname`, msg: String) if msg startsWith "started" ⇒ 2 + case Logging.Debug(`sname`, _, msg: String) if msg startsWith "now supervising" ⇒ 1 + case Logging.Debug(`aname`, `aclass`, msg: String) if msg startsWith "started" ⇒ 2 }.toSet expectNoMsg(Duration.Zero) assert(set == Set(1, 2), set + " was not Set(1, 2)") supervisor watch actor expectMsgPF(hint = "now monitoring") { - case Logging.Debug(ref, msg: String) ⇒ + case Logging.Debug(ref, `sclass`, msg: String) ⇒ ref == supervisor.underlyingActor && msg.startsWith("now monitoring") } supervisor unwatch actor expectMsgPF(hint = "stopped monitoring") { - case Logging.Debug(ref, msg: String) ⇒ + case Logging.Debug(ref, `sclass`, msg: String) ⇒ ref == supervisor.underlyingActor && msg.startsWith("stopped monitoring") } EventFilter[ActorKilledException](occurrences = 1) intercept { actor ! Kill val set = receiveWhile(messages = 3) { - case Logging.Error(_: ActorKilledException, `aname`, "Kill") ⇒ 1 - case Logging.Debug(`aname`, "restarting") ⇒ 2 - case Logging.Debug(`aname`, "restarted") ⇒ 3 + case Logging.Error(_: ActorKilledException, `aname`, `aclass`, "Kill") ⇒ 1 + case Logging.Debug(`aname`, `aclass`, "restarting") ⇒ 2 + case Logging.Debug(`aname`, `aclass`, "restarted") ⇒ 3 }.toSet expectNoMsg(Duration.Zero) assert(set == Set(1, 2, 3), set + " was not Set(1, 2, 3)") } system.stop(supervisor) - expectMsg(Logging.Debug(sname, "stopping")) - expectMsg(Logging.Debug(aname, "stopped")) - expectMsg(Logging.Debug(sname, "stopped")) + expectMsg(Logging.Debug(sname, `sclass`, "stopping")) + expectMsg(Logging.Debug(aname, `aclass`, "stopped")) + expectMsg(Logging.Debug(sname, `sclass`, "stopped")) } } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index eaad8d0610..5454d54d23 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -358,12 +358,12 @@ private[akka] class ActorCell( actor = created created.preStart() checkReceiveTimeout - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "started (" + actor + ")")) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(created), "started (" + created + ")")) } catch { // FIXME catching all and continue isn't good for OOME, ticket #1418 case e ⇒ try { - system.eventStream.publish(Error(e, self.path.toString, "error while creating actor")) + system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "error while creating actor")) // prevent any further messages to be processed until the actor has been restarted dispatcher.suspend(this) } finally { @@ -373,7 +373,7 @@ private[akka] class ActorCell( def recreate(cause: Throwable): Unit = try { val failedActor = actor - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "restarting")) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(failedActor), "restarting")) val freshActor = newActor() if (failedActor ne null) { val c = currentMessage //One read only plz @@ -388,7 +388,7 @@ private[akka] class ActorCell( actor = freshActor // assign it here so if preStart fails, we can null out the sef-refs next call hotswap = Props.noHotSwap // Reset the behavior freshActor.postRestart(cause) - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "restarted")) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(freshActor), "restarted")) dispatcher.resume(this) //FIXME should this be moved down? @@ -396,7 +396,7 @@ private[akka] class ActorCell( } catch { // FIXME catching all and continue isn't good for OOME, ticket #1418 case e ⇒ try { - system.eventStream.publish(Error(e, self.path.toString, "error while creating actor")) + system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "error while creating actor")) // prevent any further messages to be processed until the actor has been restarted dispatcher.suspend(this) } finally { @@ -417,7 +417,7 @@ private[akka] class ActorCell( else { // do not process normal messages while waiting for all children to terminate dispatcher suspend this - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "stopping")) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopping")) // do not use stop(child) because that would dissociate the children from us, but we still want to wait for them for (child ← c) child.asInstanceOf[InternalActorRef].stop() stopping = true @@ -428,12 +428,12 @@ private[akka] class ActorCell( childrenRefs.get(child.path.name) match { case None ⇒ childrenRefs = childrenRefs.updated(child.path.name, ChildRestartStats(child)) - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "now supervising " + child)) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) case Some(ChildRestartStats(`child`, _, _)) ⇒ // this is the nominal case where we created the child and entered it in actorCreated() above - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "now supervising " + child)) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) case Some(ChildRestartStats(c, _, _)) ⇒ - system.eventStream.publish(Warning(self.path.toString, "Already supervising other child with same name '" + child.path.name + "', old: " + c + " new: " + child)) + system.eventStream.publish(Warning(self.path.toString, clazz(actor), "Already supervising other child with same name '" + child.path.name + "', old: " + c + " new: " + child)) } } @@ -448,10 +448,10 @@ private[akka] class ActorCell( case Recreate(cause) ⇒ recreate(cause) case Link(subject) ⇒ system.deathWatch.subscribe(self, subject) - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "now monitoring " + subject)) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + subject)) case Unlink(subject) ⇒ system.deathWatch.unsubscribe(self, subject) - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "stopped monitoring " + subject)) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + subject)) case Suspend() ⇒ suspend() case Resume() ⇒ resume() case Terminate() ⇒ terminate() @@ -460,7 +460,7 @@ private[akka] class ActorCell( } } catch { case e ⇒ //Should we really catch everything here? - system.eventStream.publish(Error(e, self.path.toString, "error while processing " + message)) + system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "error while processing " + message)) //TODO FIXME How should problems here be handled??? throw e } @@ -480,7 +480,7 @@ private[akka] class ActorCell( currentMessage = null // reset current message after successful invocation } catch { case e ⇒ - system.eventStream.publish(Error(e, self.path.toString, e.getMessage)) + system.eventStream.publish(Error(e, self.path.toString, clazz(actor), e.getMessage)) // prevent any further messages to be processed until the actor has been restarted dispatcher.suspend(this) @@ -500,7 +500,7 @@ private[akka] class ActorCell( } } catch { case e ⇒ - system.eventStream.publish(Error(e, self.path.toString, e.getMessage)) + system.eventStream.publish(Error(e, self.path.toString, clazz(actor), e.getMessage)) throw e } } @@ -530,7 +530,8 @@ private[akka] class ActorCell( } def autoReceiveMessage(msg: Envelope) { - if (system.settings.DebugAutoReceive) system.eventStream.publish(Debug(self.path.toString, "received AutoReceiveMessage " + msg)) + if (system.settings.DebugAutoReceive) + system.eventStream.publish(Debug(self.path.toString, clazz(actor), "received AutoReceiveMessage " + msg)) msg.message match { case Failed(cause) ⇒ handleFailure(sender, cause) @@ -554,7 +555,8 @@ private[akka] class ActorCell( try { parent.sendSystemMessage(ChildTerminated(self)) system.deathWatch.publish(Terminated(self)) - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "stopped")) + if (system.settings.DebugLifecycle) + system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) // FIXME: can actor be null? } finally { currentMessage = null clearActorFields() @@ -565,8 +567,8 @@ private[akka] class ActorCell( final def handleFailure(child: ActorRef, cause: Throwable): Unit = childrenRefs.get(child.path.name) match { case Some(stats) if stats.child == child ⇒ if (!props.faultHandler.handleFailure(this, child, cause, stats, childrenRefs.values)) throw cause - case Some(stats) ⇒ system.eventStream.publish(Warning(self.path.toString, "dropping Failed(" + cause + ") from unknown child " + child + " matching names but not the same, was: " + stats.child)) - case None ⇒ system.eventStream.publish(Warning(self.path.toString, "dropping Failed(" + cause + ") from unknown child " + child)) + case Some(stats) ⇒ system.eventStream.publish(Warning(self.path.toString, clazz(actor), "dropping Failed(" + cause + ") from unknown child " + child + " matching names but not the same, was: " + stats.child)) + case None ⇒ system.eventStream.publish(Warning(self.path.toString, clazz(actor), "dropping Failed(" + cause + ") from unknown child " + child)) } final def handleChildTerminated(child: ActorRef): Unit = { @@ -625,4 +627,9 @@ private[akka] class ActorCell( lookupAndSetField(a.getClass, a, "self", self) } } + + private def clazz(o: AnyRef): Class[_] = { + if (o eq null) this.getClass + else o.getClass + } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index aafbe1f0e3..a4f1c2c37c 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -330,7 +330,7 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor // this provides basic logging (to stdout) until .start() is called below val eventStream = new EventStream(DebugEventStream) eventStream.startStdoutLogger(settings) - val log = new BusLogging(eventStream, "ActorSystem") // “this” used only for .getClass in tagging messages + val log = new BusLogging(eventStream, "ActorSystem", this.getClass) val scheduler = createScheduler() diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 52f35fd952..d9b45ea7c8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -80,7 +80,7 @@ final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cl runnable.run() } catch { // FIXME catching all and continue isn't good for OOME, ticket #1418 - case e ⇒ eventStream.publish(Error(e, "TaskInvocation", e.getMessage)) + case e ⇒ eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage)) } finally { cleanup() } diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 79331e0397..2511dbc8e2 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -59,7 +59,7 @@ class Dispatcher( executorService.get() execute invocation } catch { case e2: RejectedExecutionException ⇒ - prerequisites.eventStream.publish(Warning("Dispatcher", e2.toString)) + prerequisites.eventStream.publish(Warning("Dispatcher", this.getClass, e2.toString)) throw e2 } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 491d79a63b..942bd25a65 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -77,7 +77,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc } else { // Note that the configurator of the default dispatcher will be registered for this id, // so this will only be logged once, which is crucial. - prerequisites.eventStream.publish(Warning("Dispatchers", + prerequisites.eventStream.publish(Warning("Dispatchers", this.getClass, "Dispatcher [%s] not configured, using default-dispatcher".format(id))) lookupConfigurator(DefaultDispatcherId) } diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 933a263732..0675f1c9f2 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -325,7 +325,7 @@ object Future { // FIXME catching all and continue isn't good for OOME, ticket #1418 executor match { case m: MessageDispatcher ⇒ - m.prerequisites.eventStream.publish(Error(e, "Future.dispatchTask", e.getMessage)) + m.prerequisites.eventStream.publish(Error(e, "Future.dispatchTask", this.getClass, e.getMessage)) case other ⇒ e.printStackTrace() } @@ -566,7 +566,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { protected def logError(msg: String, problem: Throwable): Unit = { executor match { - case m: MessageDispatcher ⇒ m.prerequisites.eventStream.publish(Error(problem, msg, problem.getMessage)) + case m: MessageDispatcher ⇒ m.prerequisites.eventStream.publish(Error(problem, msg, this.getClass, problem.getMessage)) case other ⇒ problem.printStackTrace() } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 3abd961d0f..0da0bf13af 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -214,7 +214,7 @@ private[akka] abstract class Mailbox(val actor: ActorCell) extends MessageQueue } } catch { case e ⇒ - actor.system.eventStream.publish(Error(e, actor.self.path.toString, "exception during processing system messages, dropping " + SystemMessage.size(nextMessage) + " messages!")) + actor.system.eventStream.publish(Error(e, actor.self.path.toString, actor.actor.getClass, "exception during processing system messages, dropping " + SystemMessage.size(nextMessage) + " messages!")) throw e } } diff --git a/akka-actor/src/main/scala/akka/event/EventStream.scala b/akka-actor/src/main/scala/akka/event/EventStream.scala index eea9deff35..6ad2d0fbdf 100644 --- a/akka-actor/src/main/scala/akka/event/EventStream.scala +++ b/akka-actor/src/main/scala/akka/event/EventStream.scala @@ -38,19 +38,19 @@ class EventStream(private val debug: Boolean = false) extends LoggingBus with Su } override def subscribe(subscriber: ActorRef, channel: Class[_]): Boolean = { - if (debug) publish(Logging.Debug(simpleName(this), "subscribing " + subscriber + " to channel " + channel)) + if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "subscribing " + subscriber + " to channel " + channel)) super.subscribe(subscriber, channel) } override def unsubscribe(subscriber: ActorRef, channel: Class[_]): Boolean = { val ret = super.unsubscribe(subscriber, channel) - if (debug) publish(Logging.Debug(simpleName(this), "unsubscribing " + subscriber + " from channel " + channel)) + if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from channel " + channel)) ret } override def unsubscribe(subscriber: ActorRef) { super.unsubscribe(subscriber) - if (debug) publish(Logging.Debug(simpleName(this), "unsubscribing " + subscriber + " from all channels")) + if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from all channels")) } } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index bfd0f2a184..83bff79617 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -16,10 +16,6 @@ import scala.util.control.NoStackTrace import java.util.concurrent.TimeoutException import akka.dispatch.Await -object LoggingBus { - implicit def fromActorSystem(system: ActorSystem): LoggingBus = system.eventStream -} - /** * This trait brings log level handling to the EventStream: it reads the log * levels for the initial logging (StandardOutLogger) and the loggers & level @@ -75,7 +71,7 @@ trait LoggingBus extends ActorEventBus { */ private[akka] def startStdoutLogger(config: Settings) { val level = levelFor(config.StdoutLogLevel) getOrElse { - StandardOutLogger.print(Error(new EventHandlerException, simpleName(this), "unknown akka.stdout-loglevel " + config.StdoutLogLevel)) + StandardOutLogger.print(Error(new EventHandlerException, simpleName(this), this.getClass, "unknown akka.stdout-loglevel " + config.StdoutLogLevel)) ErrorLevel } AllLogLevels filter (level >= _) foreach (l ⇒ subscribe(StandardOutLogger, classFor(l))) @@ -83,7 +79,7 @@ trait LoggingBus extends ActorEventBus { loggers = Seq(StandardOutLogger) _logLevel = level } - publish(Debug(simpleName(this), "StandardOutLogger started")) + publish(Debug(simpleName(this), this.getClass, "StandardOutLogger started")) } /** @@ -91,7 +87,7 @@ trait LoggingBus extends ActorEventBus { */ private[akka] def startDefaultLoggers(system: ActorSystemImpl) { val level = levelFor(system.settings.LogLevel) getOrElse { - StandardOutLogger.print(Error(new EventHandlerException, simpleName(this), "unknown akka.stdout-loglevel " + system.settings.LogLevel)) + StandardOutLogger.print(Error(new EventHandlerException, simpleName(this), this.getClass, "unknown akka.stdout-loglevel " + system.settings.LogLevel)) ErrorLevel } try { @@ -119,7 +115,7 @@ trait LoggingBus extends ActorEventBus { loggers = myloggers _logLevel = level } - publish(Debug(simpleName(this), "Default Loggers started")) + publish(Debug(simpleName(this), this.getClass, "Default Loggers started")) if (!(defaultLoggers contains StandardOutLoggerName)) { unsubscribe(StandardOutLogger) } @@ -138,7 +134,7 @@ trait LoggingBus extends ActorEventBus { val level = _logLevel // volatile access before reading loggers if (!(loggers contains StandardOutLogger)) { AllLogLevels filter (level >= _) foreach (l ⇒ subscribe(StandardOutLogger, classFor(l))) - publish(Debug(simpleName(this), "shutting down: StandardOutLogger started")) + publish(Debug(simpleName(this), this.getClass, "shutting down: StandardOutLogger started")) } for { logger ← loggers @@ -151,7 +147,7 @@ trait LoggingBus extends ActorEventBus { case _ ⇒ } } - publish(Debug(simpleName(this), "all default loggers stopped")) + publish(Debug(simpleName(this), this.getClass, "all default loggers stopped")) } private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel): ActorRef = { @@ -160,12 +156,12 @@ trait LoggingBus extends ActorEventBus { implicit val timeout = Timeout(3 seconds) val response = try Await.result(actor ? InitializeLogger(this), timeout.duration) catch { case _: TimeoutException ⇒ - publish(Warning(simpleName(this), "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) + publish(Warning(simpleName(this), this.getClass, "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) } if (response != LoggerInitialized) throw new LoggerInitializationException("Logger " + name + " did not respond with LoggerInitialized, sent instead " + response) AllLogLevels filter (level >= _) foreach (l ⇒ subscribe(actor, classFor(l))) - publish(Debug(simpleName(this), "logger " + name + " started")) + publish(Debug(simpleName(this), this.getClass, "logger " + name + " started")) actor } @@ -217,6 +213,11 @@ object LogSource { * ... * log.info("hello world!") * + * + * The source object is used in two fashions: its `Class[_]` will be part of + * all log events produced by this logger, plus a string representation is + * generated which may contain per-instance information, see `apply` or `create` + * below. * * Loggers are attached to the level-specific channels Error, * Warning, Info and Debug as @@ -305,13 +306,11 @@ object Logging { val debugFormat = "[DEBUG] [%s] [%s] [%s] %s".intern /** - * Obtain LoggingAdapter for the given event stream (system) and source object. - * Note that there is an implicit conversion from [[akka.actor.ActorSystem]] - * to [[akka.event.LoggingBus]]. + * Obtain LoggingAdapter for the given logging bus and source object. * * The source is used to identify the source of this logging channel and must have - * a corresponding LogSource[T] instance in scope; by default these are - * provided for Class[_], Actor, ActorRef and String types. The source + * a corresponding implicit LogSource[T] instance in scope; by default these are + * provided for Class[_], Actor, ActorRef and String types. By these, the source * object is translated to a String according to the following rules: *
      *
    • if it is an Actor or ActorRef, its path is used
    • @@ -319,13 +318,34 @@ object Logging { *
    • in case of a class an approximation of its simpleName *
    • and in all other cases the simpleName of its class
    • *
    + * + * You can add your own rules quite easily: + * + * {{{ + * trait MyType { // as an example + * def name: String + * } + * + * implicit val myLogSourceType: LogSource[MyType] = new LogSource { + * def genString(a: MyType) = a.name + * } + * + * class MyClass extends MyType { + * val log = Logging(eventStream, this) // will use "hallo" as logSource + * def name = "hallo" + * } + * }}} */ - def apply[T: LogSource](eventStream: LoggingBus, logSource: T): LoggingAdapter = - new BusLogging(eventStream, implicitly[LogSource[T]].genString(logSource)) + def apply[T: LogSource](bus: LoggingBus, logSource: T): LoggingAdapter = + new BusLogging(bus, implicitly[LogSource[T]].genString(logSource), logSource.getClass) /** - * Java API: Obtain LoggingAdapter for the given system and source object. The - * source object is used to identify the source of this logging channel. The source + * Obtain LoggingAdapter for the given actor system and source object. This + * will use the system’s event stream. + * + * The source is used to identify the source of this logging channel and must have + * a corresponding implicit LogSource[T] instance in scope; by default these are + * provided for Class[_], Actor, ActorRef and String types. By these, the source * object is translated to a String according to the following rules: *
      *
    • if it is an Actor or ActorRef, its path is used
    • @@ -333,12 +353,92 @@ object Logging { *
    • in case of a class an approximation of its simpleName *
    • and in all other cases the simpleName of its class
    • *
    + * + * You can add your own rules quite easily: + * + * {{{ + * trait MyType { // as an example + * def name: String + * } + * + * implicit val myLogSourceType: LogSource[MyType] = new LogSource { + * def genString(a: MyType) = a.name + * } + * + * class MyClass extends MyType { + * val log = Logging(eventStream, this) // will use "hallo" as logSource + * def name = "hallo" + * } + * }}} */ - def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = apply(system.eventStream, LogSource.fromAnyRef(logSource)) + def apply[T: LogSource](system: ActorSystem, logSource: T): LoggingAdapter = + new BusLogging(system.eventStream, implicitly[LogSource[T]].genString(logSource), logSource.getClass) /** - * Java API: Obtain LoggingAdapter for the given event bus and source object. The - * source object is used to identify the source of this logging channel. + * Obtain LoggingAdapter for the given actor system and source object. This + * will use the system’s event stream. + * + * The source is used to identify the source of this logging channel and must have + * a corresponding implicit LogSource[T] instance in scope; by default these are + * provided for Class[_], Actor, ActorRef and String types. By these, the source + * object is translated to a String according to the following rules: + *
      + *
    • if it is an Actor or ActorRef, its path is used
    • + *
    • in case of a String it is used as is
    • + *
    • in case of a class an approximation of its simpleName + *
    • and in all other cases the simpleName of its class
    • + *
    + * + * You can add your own rules quite easily: + * + * {{{ + * trait MyType { // as an example + * def name: String + * } + * + * implicit val myLogSourceType: LogSource[MyType] = new LogSource { + * def genString(a: MyType) = a.name + * } + * + * class MyClass extends MyType { + * val log = Logging(eventStream, this) // will use "hallo" as logSource + * def name = "hallo" + * } + * }}} + */ + def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = apply(system, LogSource.fromAnyRef(logSource)) + + /** + * Obtain LoggingAdapter for the given logging bus and source object. This + * will use the system’s event stream. + * + * The source is used to identify the source of this logging channel and must have + * a corresponding implicit LogSource[T] instance in scope; by default these are + * provided for Class[_], Actor, ActorRef and String types. By these, the source + * object is translated to a String according to the following rules: + *
      + *
    • if it is an Actor or ActorRef, its path is used
    • + *
    • in case of a String it is used as is
    • + *
    • in case of a class an approximation of its simpleName + *
    • and in all other cases the simpleName of its class
    • + *
    + * + * You can add your own rules quite easily: + * + * {{{ + * trait MyType { // as an example + * def name: String + * } + * + * implicit val myLogSourceType: LogSource[MyType] = new LogSource { + * def genString(a: MyType) = a.name + * } + * + * class MyClass extends MyType { + * val log = Logging(eventStream, this) // will use "hallo" as logSource + * def name = "hallo" + * } + * }}} */ def getLogger(bus: LoggingBus, logSource: AnyRef): LoggingAdapter = apply(bus, LogSource.fromAnyRef(logSource)) @@ -362,19 +462,34 @@ object Logging { * The LogLevel of this LogEvent */ def level: LogLevel + + /** + * The source of this event + */ + def logSource: String + + /** + * The class of the source of this event + */ + def logClass: Class[_] + + /** + * The message, may be any object or null. + */ + def message: Any } /** * For ERROR Logging */ - case class Error(cause: Throwable, logSource: String, message: Any = "") extends LogEvent { - def this(logSource: String, message: Any) = this(Error.NoCause, logSource, message) + case class Error(cause: Throwable, logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { + def this(logSource: String, logClass: Class[_], message: Any) = this(Error.NoCause, logSource, logClass, message) override def level = ErrorLevel } object Error { - def apply(logSource: String, message: Any) = new Error(NoCause, logSource, message) + def apply(logSource: String, logClass: Class[_], message: Any) = new Error(NoCause, logSource, logClass, message) /** Null Object used for errors without cause Throwable */ object NoCause extends NoStackTrace @@ -383,21 +498,21 @@ object Logging { /** * For WARNING Logging */ - case class Warning(logSource: String, message: Any = "") extends LogEvent { + case class Warning(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = WarningLevel } /** * For INFO Logging */ - case class Info(logSource: String, message: Any = "") extends LogEvent { + case class Info(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = InfoLevel } /** * For DEBUG Logging */ - case class Debug(logSource: String, message: Any = "") extends LogEvent { + case class Debug(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = DebugLevel } @@ -439,7 +554,7 @@ object Logging { case e: Warning ⇒ warning(e) case e: Info ⇒ info(e) case e: Debug ⇒ debug(e) - case e ⇒ warning(Warning(simpleName(this), "received unexpected event of class " + e.getClass + ": " + e)) + case e ⇒ warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e)) } } @@ -626,7 +741,7 @@ trait LoggingAdapter { } } -class BusLogging(val bus: LoggingBus, val logSource: String) extends LoggingAdapter { +class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class[_]) extends LoggingAdapter { import Logging._ @@ -635,14 +750,14 @@ class BusLogging(val bus: LoggingBus, val logSource: String) extends LoggingAdap def isInfoEnabled = bus.logLevel >= InfoLevel def isDebugEnabled = bus.logLevel >= DebugLevel - protected def notifyError(message: String) { bus.publish(Error(logSource, message)) } + protected def notifyError(message: String) { bus.publish(Error(logSource, logClass, message)) } - protected def notifyError(cause: Throwable, message: String) { bus.publish(Error(cause, logSource, message)) } + protected def notifyError(cause: Throwable, message: String) { bus.publish(Error(cause, logSource, logClass, message)) } - protected def notifyWarning(message: String) { bus.publish(Warning(logSource, message)) } + protected def notifyWarning(message: String) { bus.publish(Warning(logSource, logClass, message)) } - protected def notifyInfo(message: String) { bus.publish(Info(logSource, message)) } + protected def notifyInfo(message: String) { bus.publish(Info(logSource, logClass, message)) } - protected def notifyDebug(message: String) { bus.publish(Debug(logSource, message)) } + protected def notifyDebug(message: String) { bus.publish(Debug(logSource, logClass, message)) } } diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index 250af89812..bb5a282856 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -36,7 +36,7 @@ object LoggingReceive { class LoggingReceive(source: AnyRef, r: Receive)(implicit system: ActorSystem) extends Receive { def isDefinedAt(o: Any) = { val handled = r.isDefinedAt(o) - system.eventStream.publish(Debug(LogSource.fromAnyRef(source), "received " + (if (handled) "handled" else "unhandled") + " message " + o)) + system.eventStream.publish(Debug(LogSource.fromAnyRef(source), source.getClass, "received " + (if (handled) "handled" else "unhandled") + " message " + o)) handled } def apply(o: Any): Unit = r(o) diff --git a/akka-actor/src/main/scala/akka/util/JMX.scala b/akka-actor/src/main/scala/akka/util/JMX.scala index bcfd5d2477..9a9f0530fb 100644 --- a/akka-actor/src/main/scala/akka/util/JMX.scala +++ b/akka-actor/src/main/scala/akka/util/JMX.scala @@ -21,7 +21,7 @@ object JMX { case e: InstanceAlreadyExistsException ⇒ Some(mbeanServer.getObjectInstance(name)) case e: Exception ⇒ - system.eventStream.publish(Error(e, "JMX", "Error when registering mbean [%s]".format(mbean))) + system.eventStream.publish(Error(e, "JMX", this.getClass, "Error when registering mbean [%s]".format(mbean))) None } @@ -29,6 +29,6 @@ object JMX { mbeanServer.unregisterMBean(mbean) } catch { case e: InstanceNotFoundException ⇒ {} - case e: Exception ⇒ system.eventStream.publish(Error(e, "JMX", "Error while unregistering mbean [%s]".format(mbean))) + case e: Exception ⇒ system.eventStream.publish(Error(e, "JMX", this.getClass, "Error while unregistering mbean [%s]".format(mbean))) } } diff --git a/akka-docs/java/logging.rst b/akka-docs/java/logging.rst index 20920d940b..aee644c175 100644 --- a/akka-docs/java/logging.rst +++ b/akka-docs/java/logging.rst @@ -83,8 +83,8 @@ creating the ``LoggingAdapter`` correspond to the name of the SL4FJ logger. loglevel = "DEBUG" } -Logging thread in MDC ---------------------- +Logging Thread and Akka Source in MDC +------------------------------------- Since the logging is done asynchronously the thread in which the logging was performed is captured in Mapped Diagnostic Context (MDC) with attribute name ``sourceThread``. @@ -96,3 +96,16 @@ With Logback the thread name is available with ``%X{sourceThread}`` specifier wi +Another helpful facility is that Akka captures the actor’s address when +instantiating a logger within it, meaning that the full instance identification +is available for associating log messages e.g. with members of a router. This +information is available in the MDC with attribute name ``akkaSource``:: + + + + %date{ISO8601} %-5level %logger{36} %X{akkaSource} - %msg%n + + + +For more details on what this attribute contains—also for non-actors—please see +`How to Log`_. diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index 20ac33480b..dd25d9d820 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -162,10 +162,10 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { system.eventStream.subscribe(testActor, classOf[Logging.Info]) myActor ! "test" - expectMsgPF(1 second) { case Logging.Info(_, "received test") ⇒ true } + expectMsgPF(1 second) { case Logging.Info(_, _, "received test") ⇒ true } myActor ! "unknown" - expectMsgPF(1 second) { case Logging.Info(_, "received unknown message") ⇒ true } + expectMsgPF(1 second) { case Logging.Info(_, _, "received unknown message") ⇒ true } system.eventStream.unsubscribe(testActor) system.eventStream.publish(TestEvent.UnMute(filter)) diff --git a/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala b/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala index ffa56a3064..c3c070d374 100644 --- a/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala @@ -38,11 +38,11 @@ object LoggingDocSpec { class MyEventListener extends Actor { def receive = { - case InitializeLogger(_) ⇒ sender ! LoggerInitialized - case Error(cause, logSource, message) ⇒ // ... - case Warning(logSource, message) ⇒ // ... - case Info(logSource, message) ⇒ // ... - case Debug(logSource, message) ⇒ // ... + case InitializeLogger(_) ⇒ sender ! LoggerInitialized + case Error(cause, logSource, logClass, message) ⇒ // ... + case Warning(logSource, logClass, message) ⇒ // ... + case Info(logSource, logClass, message) ⇒ // ... + case Debug(logSource, logClass, message) ⇒ // ... } } //#my-event-listener diff --git a/akka-docs/scala/logging.rst b/akka-docs/scala/logging.rst index 35f4e838ff..f4272c5da0 100644 --- a/akka-docs/scala/logging.rst +++ b/akka-docs/scala/logging.rst @@ -85,8 +85,8 @@ creating the ``LoggingAdapter`` correspond to the name of the SL4FJ logger. loglevel = "DEBUG" } -Logging thread in MDC ---------------------- +Logging Thread and Akka Source in MDC +------------------------------------- Since the logging is done asynchronously the thread in which the logging was performed is captured in Mapped Diagnostic Context (MDC) with attribute name ``sourceThread``. @@ -98,3 +98,16 @@ With Logback the thread name is available with ``%X{sourceThread}`` specifier wi +Another helpful facility is that Akka captures the actor’s address when +instantiating a logger within it, meaning that the full instance identification +is available for associating log messages e.g. with members of a router. This +information is available in the MDC with attribute name ``akkaSource``:: + + + + %date{ISO8601} %-5level %logger{36} %X{akkaSource} - %msg%n + + + +For more details on what this attribute contains—also for non-actors—please see +`How to Log`_. diff --git a/akka-remote/src/main/scala/akka/remote/Remote.scala b/akka-remote/src/main/scala/akka/remote/Remote.scala index 1af4802552..6efa542e0e 100644 --- a/akka-remote/src/main/scala/akka/remote/Remote.scala +++ b/akka-remote/src/main/scala/akka/remote/Remote.scala @@ -153,7 +153,7 @@ class RemoteSystemDaemon(system: ActorSystemImpl, remote: Remote, _path: ActorPa override def !(msg: Any)(implicit sender: ActorRef = null): Unit = msg match { case message: DaemonMsg ⇒ - log.debug("Received command [\n{}] to RemoteSystemDaemon on [{}]", message, remote.remoteSettings.NodeName) + log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, remote.remoteSettings.NodeName) message match { case DaemonMsgCreate(factory, path, supervisor) ⇒ import remote.remoteAddress diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 4624c9dc73..719261a5b6 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -60,7 +60,7 @@ abstract class RemoteClient private[akka] ( * Converts the message to the wireprotocol and sends the message across the wire */ def send(message: Any, senderOption: Option[ActorRef], recipient: ActorRef): Unit = if (isRunning) { - log.debug("Sending message: {}", message) + log.debug("Sending message {} from {} to {}", message, senderOption, recipient) send((message, senderOption, recipient)) } else { val exception = new RemoteClientException("RemoteModule client is not running, make sure you have invoked 'RemoteClient.connect()' before using it.", remoteSupport, remoteAddress) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala index b51720aa01..e585ade6d7 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala @@ -15,8 +15,10 @@ akka { deployment { /watchers.remote = "akka://other@127.0.0.1:2666" } + debug.lifecycle = on } cluster.nodename = buh + loglevel = DEBUG remote.server { hostname = "127.0.0.1" port = 2665 diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala index 91f3123634..4831d78270 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala @@ -19,6 +19,7 @@ trait SLF4JLogging { object Logger { def apply(logger: String): SLFLogger = SLFLoggerFactory getLogger logger + def apply(logClass: Class[_]): SLFLogger = SLFLoggerFactory getLogger logClass def root: SLFLogger = apply(SLFLogger.ROOT_LOGGER_NAME) } @@ -31,30 +32,31 @@ object Logger { class Slf4jEventHandler extends Actor with SLF4JLogging { val mdcThreadAttributeName = "sourceThread" + val mdcAkkaSourceAttributeName = "akkaSource" def receive = { - case event @ Error(cause, logSource, message) ⇒ - withMdc(mdcThreadAttributeName, event.thread.getName) { + case event @ Error(cause, logSource, logClass, message) ⇒ + withMdc(logSource, event.thread.getName) { cause match { - case Error.NoCause ⇒ Logger(logSource).error(message.toString) - case _ ⇒ Logger(logSource).error(message.toString, cause) + case Error.NoCause ⇒ Logger(logClass).error(message.toString) + case _ ⇒ Logger(logClass).error(message.toString, cause) } } - case event @ Warning(logSource, message) ⇒ - withMdc(mdcThreadAttributeName, event.thread.getName) { - Logger(logSource).warn("{}", message.asInstanceOf[AnyRef]) + case event @ Warning(logSource, logClass, message) ⇒ + withMdc(logSource, event.thread.getName) { + Logger(logClass).warn("{}", message.asInstanceOf[AnyRef]) } - case event @ Info(logSource, message) ⇒ - withMdc(mdcThreadAttributeName, event.thread.getName) { - Logger(logSource).info("{}", message.asInstanceOf[AnyRef]) + case event @ Info(logSource, logClass, message) ⇒ + withMdc(logSource, event.thread.getName) { + Logger(logClass).info("{}", message.asInstanceOf[AnyRef]) } - case event @ Debug(logSource, message) ⇒ - withMdc(mdcThreadAttributeName, event.thread.getName) { - Logger(logSource).debug("{}", message.asInstanceOf[AnyRef]) + case event @ Debug(logSource, logClass, message) ⇒ + withMdc(logSource, event.thread.getName) { + Logger(logClass).debug("{}", message.asInstanceOf[AnyRef]) } case InitializeLogger(_) ⇒ @@ -63,12 +65,14 @@ class Slf4jEventHandler extends Actor with SLF4JLogging { } @inline - final def withMdc(name: String, value: String)(logStatement: ⇒ Unit) { - MDC.put(name, value) + final def withMdc(logSource: String, thread: String)(logStatement: ⇒ Unit) { + MDC.put(mdcAkkaSourceAttributeName, logSource) + MDC.put(mdcThreadAttributeName, thread) try { logStatement } finally { - MDC.remove(name) + MDC.remove(mdcAkkaSourceAttributeName) + MDC.remove(mdcThreadAttributeName) } } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala index 7da8d84eba..3bee246e11 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala @@ -254,7 +254,7 @@ case class ErrorFilter( def matches(event: LogEvent) = { event match { - case Error(cause, src, msg) if throwable isInstance cause ⇒ + case Error(cause, src, _, msg) if throwable isInstance cause ⇒ (msg == null && cause.getMessage == null && cause.getStackTrace.length == 0) || doMatch(src, msg) || doMatch(src, cause.getMessage) case _ ⇒ false @@ -305,8 +305,8 @@ case class WarningFilter( def matches(event: LogEvent) = { event match { - case Warning(src, msg) ⇒ doMatch(src, msg) - case _ ⇒ false + case Warning(src, _, msg) ⇒ doMatch(src, msg) + case _ ⇒ false } } @@ -348,8 +348,8 @@ case class InfoFilter( def matches(event: LogEvent) = { event match { - case Info(src, msg) ⇒ doMatch(src, msg) - case _ ⇒ false + case Info(src, _, msg) ⇒ doMatch(src, msg) + case _ ⇒ false } } @@ -391,8 +391,8 @@ case class DebugFilter( def matches(event: LogEvent) = { event match { - case Debug(src, msg) ⇒ doMatch(src, msg) - case _ ⇒ false + case Debug(src, _, msg) ⇒ doMatch(src, msg) + case _ ⇒ false } } @@ -456,15 +456,15 @@ class TestEventListener extends Logging.DefaultLogger { case event: LogEvent ⇒ if (!filter(event)) print(event) case DeadLetter(msg: SystemMessage, _, rcp) ⇒ if (!msg.isInstanceOf[Terminate]) { - val event = Warning(rcp.path.toString, "received dead system message: " + msg) + val event = Warning(rcp.path.toString, rcp.getClass, "received dead system message: " + msg) if (!filter(event)) print(event) } case DeadLetter(msg, snd, rcp) ⇒ if (!msg.isInstanceOf[Terminated]) { - val event = Warning(rcp.path.toString, "received dead letter from " + snd + ": " + msg) + val event = Warning(rcp.path.toString, rcp.getClass, "received dead letter from " + snd + ": " + msg) if (!filter(event)) print(event) } - case m ⇒ print(Debug(context.system.name, m)) + case m ⇒ print(Debug(context.system.name, this.getClass, m)) } def filter(event: LogEvent): Boolean = filters exists (f ⇒ try { f(event) } catch { case e: Exception ⇒ false }) diff --git a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala index 747a9c90e9..d5c9b1a151 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala @@ -81,7 +81,7 @@ object TestActorRefSpec { var count = 0 var msg: String = _ def receive = { - case Warning(_, m: String) ⇒ count += 1; msg = m + case Warning(_, _, m: String) ⇒ count += 1; msg = m } } From 255a8c74e968cbe2ee3e8a08a01042de0d7cf82d Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 12 Jan 2012 11:35:48 +0100 Subject: [PATCH 40/84] =?UTF-8?q?include=20actor=20system=E2=80=99s=20addr?= =?UTF-8?q?ess=20in=20akkaSource=20log=20property,=20see=20#1621?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../test/scala/akka/actor/FSMActorSpec.scala | 11 +- .../src/main/scala/akka/actor/Actor.scala | 2 +- .../scala/akka/actor/ActorRefProvider.scala | 2 +- .../main/scala/akka/actor/ActorSystem.scala | 9 +- .../src/main/scala/akka/actor/FSM.scala | 2 +- .../src/main/scala/akka/event/Logging.scala | 129 ++++++++++-------- .../akka/remote/RemoteActorRefProvider.scala | 4 +- 7 files changed, 91 insertions(+), 68 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index c5ed765aab..67c94d3dd7 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -218,18 +218,19 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im } }) val name = fsm.path.toString + val fsmClass = fsm.underlyingActor.getClass system.eventStream.subscribe(testActor, classOf[Logging.Debug]) fsm ! "go" expectMsgPF(1 second, hint = "processing Event(go,null)") { - case Logging.Debug(`name`, _, s: String) if s.startsWith("processing Event(go,null) from Actor[") ⇒ true + case Logging.Debug(`name`, `fsmClass`, s: String) if s.startsWith("processing Event(go,null) from Actor[") ⇒ true } - expectMsg(1 second, Logging.Debug(name, fsm.underlyingActor.getClass, "setting timer 't'/1500 milliseconds: Shutdown")) - expectMsg(1 second, Logging.Debug(name, fsm.underlyingActor.getClass, "transition 1 -> 2")) + expectMsg(1 second, Logging.Debug(name, fsmClass, "setting timer 't'/1500 milliseconds: Shutdown")) + expectMsg(1 second, Logging.Debug(name, fsmClass, "transition 1 -> 2")) fsm ! "stop" expectMsgPF(1 second, hint = "processing Event(stop,null)") { - case Logging.Debug(`name`, _, s: String) if s.startsWith("processing Event(stop,null) from Actor[") ⇒ true + case Logging.Debug(`name`, `fsmClass`, s: String) if s.startsWith("processing Event(stop,null) from Actor[") ⇒ true } - expectMsgAllOf(1 second, Logging.Debug(name, fsm.underlyingActor.getClass, "canceling timer 't'"), Normal) + expectMsgAllOf(1 second, Logging.Debug(name, fsmClass, "canceling timer 't'"), Normal) expectNoMsg(1 second) system.eventStream.unsubscribe(testActor) } diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 8713df95b4..4681e88cfa 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -112,7 +112,7 @@ object Status { } trait ActorLogging { this: Actor ⇒ - val log = akka.event.Logging(context.system.eventStream, context.self) + val log = akka.event.Logging(context.system, context.self) } object Actor { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index d940aa2c20..67a1603105 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -296,7 +296,7 @@ class LocalActorRefProvider( val nodename: String = "local" val clustername: String = "local" - val log = Logging(eventStream, "LocalActorRefProvider") + val log = Logging(eventStream, "LocalActorRefProvider(" + rootPath.address + ")") /* * generate name for temporary actor refs diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index a4f1c2c37c..e24a3a29f2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -330,7 +330,11 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor // this provides basic logging (to stdout) until .start() is called below val eventStream = new EventStream(DebugEventStream) eventStream.startStdoutLogger(settings) - val log = new BusLogging(eventStream, "ActorSystem", this.getClass) + + // unfortunately we need logging before we know the rootpath address, which wants to be inserted here + @volatile + private var _log = new BusLogging(eventStream, "ActorSystem(" + name + ")", this.getClass) + def log = _log val scheduler = createScheduler() @@ -383,6 +387,7 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor private lazy val _start: this.type = { // the provider is expected to start default loggers, LocalActorRefProvider does this provider.init(this) + _log = new BusLogging(eventStream, "ActorSystem(" + lookupRoot.path.address + ")", this.getClass) deadLetters.init(dispatcher, lookupRoot.path / "deadLetters") // this starts the reaper actor and the user-configured logging subscribers, which are also actors registerOnTermination(stopScheduler()) @@ -498,4 +503,6 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor } } + + override def toString = lookupRoot.path.root.address.toString } diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index b49be8c0b5..eb573df767 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -190,7 +190,7 @@ trait FSM[S, D] extends Listeners { type Timeout = Option[Duration] type TransitionHandler = PartialFunction[(S, S), Unit] - val log = Logging(context.system, context.self) + val log = Logging(context.system, this) /** * **************************************** diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 83bff79617..33c4b1339e 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -86,8 +86,9 @@ trait LoggingBus extends ActorEventBus { * Internal Akka use only */ private[akka] def startDefaultLoggers(system: ActorSystemImpl) { + val logName = simpleName(this) + "(" + system + ")" val level = levelFor(system.settings.LogLevel) getOrElse { - StandardOutLogger.print(Error(new EventHandlerException, simpleName(this), this.getClass, "unknown akka.stdout-loglevel " + system.settings.LogLevel)) + StandardOutLogger.print(Error(new EventHandlerException, logName, this.getClass, "unknown akka.stdout-loglevel " + system.settings.LogLevel)) ErrorLevel } try { @@ -101,7 +102,7 @@ trait LoggingBus extends ActorEventBus { } yield { try { ReflectiveAccess.getClassFor[Actor](loggerName) match { - case Right(actorClass) ⇒ addLogger(system, actorClass, level) + case Right(actorClass) ⇒ addLogger(system, actorClass, level, logName) case Left(exception) ⇒ throw exception } } catch { @@ -115,7 +116,7 @@ trait LoggingBus extends ActorEventBus { loggers = myloggers _logLevel = level } - publish(Debug(simpleName(this), this.getClass, "Default Loggers started")) + publish(Debug(logName, this.getClass, "Default Loggers started")) if (!(defaultLoggers contains StandardOutLoggerName)) { unsubscribe(StandardOutLogger) } @@ -150,18 +151,18 @@ trait LoggingBus extends ActorEventBus { publish(Debug(simpleName(this), this.getClass, "all default loggers stopped")) } - private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel): ActorRef = { + private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel, logName: String): ActorRef = { val name = "log" + Extension(system).id() + "-" + simpleName(clazz) val actor = system.systemActorOf(Props(clazz), name) implicit val timeout = Timeout(3 seconds) val response = try Await.result(actor ? InitializeLogger(this), timeout.duration) catch { case _: TimeoutException ⇒ - publish(Warning(simpleName(this), this.getClass, "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) + publish(Warning(logName, this.getClass, "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) } if (response != LoggerInitialized) throw new LoggerInitializationException("Logger " + name + " did not respond with LoggerInitialized, sent instead " + response) AllLogLevels filter (level >= _) foreach (l ⇒ subscribe(actor, classFor(l))) - publish(Debug(simpleName(this), this.getClass, "logger " + name + " started")) + publish(Debug(logName, this.getClass, "logger " + name + " started")) actor } @@ -169,11 +170,13 @@ trait LoggingBus extends ActorEventBus { trait LogSource[-T] { def genString(t: T): String + def genString(t: T, system: ActorSystem): String = genString(t) } object LogSource { implicit val fromString: LogSource[String] = new LogSource[String] { def genString(s: String) = s + override def genString(s: String, system: ActorSystem) = s + "(" + system + ")" } implicit val fromActor: LogSource[Actor] = new LogSource[Actor] { @@ -187,10 +190,13 @@ object LogSource { // this one unfortunately does not work as implicit, because existential types have some weird behavior val fromClass: LogSource[Class[_]] = new LogSource[Class[_]] { def genString(c: Class[_]) = simpleName(c) + override def genString(c: Class[_], system: ActorSystem) = simpleName(c) + "(" + system + ")" } implicit def fromAnyClass[T]: LogSource[Class[T]] = fromClass.asInstanceOf[LogSource[Class[T]]] - def apply[T: LogSource](o: T) = implicitly[LogSource[T]].genString(o) + def apply[T: LogSource](o: T): String = implicitly[LogSource[T]].genString(o) + + def apply[T: LogSource](o: T, system: ActorSystem): String = implicitly[LogSource[T]].genString(o, system) def fromAnyRef(o: AnyRef): String = o match { @@ -200,6 +206,15 @@ object LogSource { case s: String ⇒ s case x ⇒ simpleName(x) } + + def fromAnyRef(o: AnyRef, system: ActorSystem): String = + o match { + case c: Class[_] ⇒ fromClass.genString(c, system) + case a: Actor ⇒ fromActor.genString(a, system) + case a: ActorRef ⇒ fromActorRef.genString(a, system) + case s: String ⇒ fromString.genString(s, system) + case x ⇒ simpleName(x) + "(" + system + ")" + } } /** @@ -213,8 +228,8 @@ object LogSource { * ... * log.info("hello world!") * - * - * The source object is used in two fashions: its `Class[_]` will be part of + * + * The source object is used in two fashions: its `Class[_]` will be part of * all log events produced by this logger, plus a string representation is * generated which may contain per-instance information, see `apply` or `create` * below. @@ -305,6 +320,41 @@ object Logging { val infoFormat = "[INFO] [%s] [%s] [%s] %s".intern val debugFormat = "[DEBUG] [%s] [%s] [%s] %s".intern + /** + * Obtain LoggingAdapter for the given actor system and source object. This + * will use the system’s event stream. + * + * The source is used to identify the source of this logging channel and must have + * a corresponding implicit LogSource[T] instance in scope; by default these are + * provided for Class[_], Actor, ActorRef and String types. By these, the source + * object is translated to a String according to the following rules: + *
      + *
    • if it is an Actor or ActorRef, its path is used
    • + *
    • in case of a String it is used as is
    • + *
    • in case of a class an approximation of its simpleName + *
    • and in all other cases the simpleName of its class
    • + *
    + * + * You can add your own rules quite easily: + * + * {{{ + * trait MyType { // as an example + * def name: String + * } + * + * implicit val myLogSourceType: LogSource[MyType] = new LogSource { + * def genString(a: MyType) = a.name + * } + * + * class MyClass extends MyType { + * val log = Logging(eventStream, this) // will use "hallo" as logSource + * def name = "hallo" + * } + * }}} + */ + def apply[T: LogSource](system: ActorSystem, logSource: T): LoggingAdapter = + new BusLogging(system.eventStream, LogSource(logSource, system), logSource.getClass) + /** * Obtain LoggingAdapter for the given logging bus and source object. * @@ -318,18 +368,18 @@ object Logging { *
  • in case of a class an approximation of its simpleName *
  • and in all other cases the simpleName of its class
  • *
- * + * * You can add your own rules quite easily: - * + * * {{{ * trait MyType { // as an example * def name: String * } - * + * * implicit val myLogSourceType: LogSource[MyType] = new LogSource { * def genString(a: MyType) = a.name * } - * + * * class MyClass extends MyType { * val log = Logging(eventStream, this) // will use "hallo" as logSource * def name = "hallo" @@ -353,60 +403,25 @@ object Logging { *
  • in case of a class an approximation of its simpleName *
  • and in all other cases the simpleName of its class
  • * - * - * You can add your own rules quite easily: - * - * {{{ - * trait MyType { // as an example - * def name: String - * } - * - * implicit val myLogSourceType: LogSource[MyType] = new LogSource { - * def genString(a: MyType) = a.name - * } - * - * class MyClass extends MyType { - * val log = Logging(eventStream, this) // will use "hallo" as logSource - * def name = "hallo" - * } - * }}} - */ - def apply[T: LogSource](system: ActorSystem, logSource: T): LoggingAdapter = - new BusLogging(system.eventStream, implicitly[LogSource[T]].genString(logSource), logSource.getClass) - - /** - * Obtain LoggingAdapter for the given actor system and source object. This - * will use the system’s event stream. * - * The source is used to identify the source of this logging channel and must have - * a corresponding implicit LogSource[T] instance in scope; by default these are - * provided for Class[_], Actor, ActorRef and String types. By these, the source - * object is translated to a String according to the following rules: - *
      - *
    • if it is an Actor or ActorRef, its path is used
    • - *
    • in case of a String it is used as is
    • - *
    • in case of a class an approximation of its simpleName - *
    • and in all other cases the simpleName of its class
    • - *
    - * * You can add your own rules quite easily: - * + * * {{{ * trait MyType { // as an example * def name: String * } - * + * * implicit val myLogSourceType: LogSource[MyType] = new LogSource { * def genString(a: MyType) = a.name * } - * + * * class MyClass extends MyType { * val log = Logging(eventStream, this) // will use "hallo" as logSource * def name = "hallo" * } * }}} */ - def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = apply(system, LogSource.fromAnyRef(logSource)) + def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = apply(system, LogSource.fromAnyRef(logSource, system)) /** * Obtain LoggingAdapter for the given logging bus and source object. This @@ -422,25 +437,25 @@ object Logging { *
  • in case of a class an approximation of its simpleName *
  • and in all other cases the simpleName of its class
  • * - * + * * You can add your own rules quite easily: - * + * * {{{ * trait MyType { // as an example * def name: String * } - * + * * implicit val myLogSourceType: LogSource[MyType] = new LogSource { * def genString(a: MyType) = a.name * } - * + * * class MyClass extends MyType { * val log = Logging(eventStream, this) // will use "hallo" as logSource * def name = "hallo" * } * }}} */ - def getLogger(bus: LoggingBus, logSource: AnyRef): LoggingAdapter = apply(bus, LogSource.fromAnyRef(logSource)) + //def getLogger(bus: LoggingBus, logSource: AnyRef): LoggingAdapter = apply(bus, LogSource.fromAnyRef(logSource)) /** * Artificial exception injected into Error events if no Throwable is diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 5b748098ac..037f9d594a 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -27,8 +27,6 @@ class RemoteActorRefProvider( val scheduler: Scheduler, _deadLetters: InternalActorRef) extends ActorRefProvider { - val log = Logging(eventStream, "RemoteActorRefProvider") - val remoteSettings = new RemoteSettings(settings.config, systemName) def rootGuardian = local.rootGuardian @@ -44,6 +42,8 @@ class RemoteActorRefProvider( val remote = new Remote(settings, remoteSettings) implicit val transports = remote.transports + val log = Logging(eventStream, "RemoteActorRefProvider(" + remote.remoteAddress + ")") + val rootPath: ActorPath = RootActorPath(remote.remoteAddress) private val local = new LocalActorRefProvider(systemName, settings, eventStream, scheduler, _deadLetters, rootPath, deployer) From 358da77ae306a07dcbd03c32d58ef4493b75fd98 Mon Sep 17 00:00:00 2001 From: Roland Date: Wed, 11 Jan 2012 14:12:10 +0100 Subject: [PATCH 41/84] make failing test deterministic for RemoteDeathWatchSpec --- .../test/scala/akka/actor/DeathWatchSpec.scala | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index 30828c1014..59fe72cc07 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -34,16 +34,23 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout "notify with one Terminated message when an Actor is stopped" in { val terminal = system.actorOf(Props(context ⇒ { case _ ⇒ })) - startWatching(terminal) - - testActor ! "ping" - expectMsg("ping") + startWatching(terminal) ! "hallo" + expectMsg("hallo") // this ensures that the DaemonMsgWatch has been received before we send the PoisonPill terminal ! PoisonPill expectTerminationOf(terminal) } + "notify with one Terminated message when an Actor is already dead" in { + val terminal = system.actorOf(Props(context ⇒ { case _ ⇒ })) + + terminal ! PoisonPill + + startWatching(terminal) + expectTerminationOf(terminal) + } + "notify with all monitors with one Terminated message when an Actor is stopped" in { val terminal = system.actorOf(Props(context ⇒ { case _ ⇒ })) val monitor1, monitor2, monitor3 = startWatching(terminal) From 7d0e0065476afde5e9c71ae3090d702a99775be0 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 12 Jan 2012 13:25:21 +0100 Subject: [PATCH 42/84] fix bug in EmptyLocalActorRef serialization, see #1591 - ELAR extended DeadLetterActorRef, which is serialized specially, not keeping the name - made deadletter behavior a trait, mix that into both and only override writeReplace in DLAR - remove extraneous debug settings from RemoteDeathWatchSpec --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 11 ++++++++--- .../test/scala/akka/remote/RemoteDeathWatchSpec.scala | 2 -- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 254b19e010..94ec966468 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -449,7 +449,10 @@ object DeadLetterActorRef { val serialized = new SerializedDeadLetterActorRef } -class DeadLetterActorRef(val eventStream: EventStream) extends MinimalActorRef { +trait DeadLetterActorRefLike extends MinimalActorRef { + + def eventStream: EventStream + @volatile private var brokenPromise: Future[Any] = _ @volatile @@ -477,7 +480,9 @@ class DeadLetterActorRef(val eventStream: EventStream) extends MinimalActorRef { assert(brokenPromise != null) brokenPromise } +} +class DeadLetterActorRef(val eventStream: EventStream) extends DeadLetterActorRefLike { @throws(classOf[java.io.ObjectStreamException]) override protected def writeReplace(): AnyRef = DeadLetterActorRef.serialized } @@ -486,8 +491,8 @@ class DeadLetterActorRef(val eventStream: EventStream) extends MinimalActorRef { * This special dead letter reference has a name: it is that which is returned * by a local look-up which is unsuccessful. */ -class EmptyLocalActorRef(_eventStream: EventStream, _dispatcher: MessageDispatcher, _path: ActorPath) - extends DeadLetterActorRef(_eventStream) { +class EmptyLocalActorRef(val eventStream: EventStream, _dispatcher: MessageDispatcher, _path: ActorPath) + extends DeadLetterActorRefLike { init(_dispatcher, _path) override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { case d: DeadLetter ⇒ // do NOT form endless loops diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala index e585ade6d7..b51720aa01 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala @@ -15,10 +15,8 @@ akka { deployment { /watchers.remote = "akka://other@127.0.0.1:2666" } - debug.lifecycle = on } cluster.nodename = buh - loglevel = DEBUG remote.server { hostname = "127.0.0.1" port = 2665 From 62499b59011e6c82181a27420e143c1216ba492f Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 12 Jan 2012 13:45:53 +0100 Subject: [PATCH 43/84] quiesce logging of artifact resolution from Ivy --- project/AkkaBuild.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index ada3dbcbd2..b7e950fa8a 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -315,6 +315,8 @@ object AkkaBuild extends Build { if (true || (System getProperty "java.runtime.version" startsWith "1.7")) Seq() else Seq("-optimize")), // -optimize fails with jdk7 javacOptions ++= Seq("-Xlint:unchecked", "-Xlint:deprecation"), + ivyLoggingLevel in ThisBuild := UpdateLogging.Quiet, + parallelExecution in Test := System.getProperty("akka.parallelExecution", "false").toBoolean, // for excluding tests by name (or use system property: -Dakka.test.names.exclude=TimingSpec) From 8d10d44929c8443f93b4dae6eb1e6991bafbb7a9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 12 Jan 2012 16:37:08 +0100 Subject: [PATCH 44/84] Support config of custom router. See #1623 --- .../test/scala/akka/routing/ResizerSpec.scala | 2 +- .../test/scala/akka/routing/RoutingSpec.scala | 21 ++++++++++++++++ akka-actor/src/main/resources/reference.conf | 4 +++- .../src/main/scala/akka/actor/Deployer.scala | 24 ++++++++++--------- .../src/main/scala/akka/routing/Routing.scala | 15 ++++++++++++ akka-docs/java/routing.rst | 8 +++++++ akka-docs/scala/routing.rst | 8 +++++++ 7 files changed, 69 insertions(+), 13 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 6ccad2a95f..35cc429fa6 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -178,7 +178,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(2) def loop(loops: Int, t: Int, latch: TestLatch, count: AtomicInteger) = { - count.set(0) + (10 millis).dilated.sleep for (m ← 0 until loops) { router.!((t, latch, count)) (10 millis).dilated.sleep diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 077e69e5d9..a9ec39ff6e 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -13,6 +13,7 @@ import akka.util.Duration import akka.config.ConfigurationException import com.typesafe.config.ConfigFactory import java.util.concurrent.ConcurrentHashMap +import com.typesafe.config.Config object RoutingSpec { @@ -22,6 +23,10 @@ object RoutingSpec { router = round-robin nr-of-instances = 3 } + /myrouter { + router = "akka.routing.RoutingSpec$MyRouter" + foo = bar + } } """ @@ -38,6 +43,18 @@ object RoutingSpec { } } + class MyRouter(config: Config) extends RouterConfig { + val foo = config.getString("foo") + def createRoute(routeeProps: Props, actorContext: ActorContext): Route = { + val routees = IndexedSeq(actorContext.actorOf(Props[Echo])) + registerRoutees(actorContext, routees) + + { + case (sender, message) ⇒ Nil + } + } + } + } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) @@ -465,6 +482,10 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with sys.shutdown() } } + "support custom router" in { + val myrouter = system.actorOf(Props().withRouter(FromConfig), "myrouter") + myrouter.isTerminated must be(false) + } } "custom router" must { diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 07e363fca9..02d1a49035 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -65,7 +65,9 @@ akka { # routing (load-balance) scheme to use # available: "from-code", "round-robin", "random", "smallest-mailbox", "scatter-gather", "broadcast" - # or: fully qualified class name of the router class + # or: Fully qualified class name of the router class. + # The router class must extend akka.routing.CustomRouterConfig and and have constructor + # with com.typesafe.config.Config parameter. # default is "from-code"; # Whether or not an actor is transformed to a Router is decided in code only (Props.withRouter). # The type of router can be overridden in the configuration; specifying "from-code" means diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 23c6da6661..5ac4c13391 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -13,6 +13,7 @@ import akka.event.EventStream import com.typesafe.config._ import akka.routing._ import java.util.concurrent.{ TimeUnit, ConcurrentHashMap } +import akka.util.ReflectiveAccess case class Deploy(path: String, config: Config, recipe: Option[ActorRecipe] = None, routing: RouterConfig = NoRouter, scope: Scope = LocalScope) @@ -56,16 +57,7 @@ class Deployer(val settings: ActorSystem.Settings) { val within = Duration(deployment.getMilliseconds("within"), TimeUnit.MILLISECONDS) val resizer: Option[Resizer] = if (config.hasPath("resizer")) { - val resizerConfig = deployment.getConfig("resizer") - Some(DefaultResizer( - lowerBound = resizerConfig.getInt("lower-bound"), - upperBound = resizerConfig.getInt("upper-bound"), - pressureThreshold = resizerConfig.getInt("pressure-threshold"), - rampupRate = resizerConfig.getDouble("rampup-rate"), - backoffThreshold = resizerConfig.getDouble("backoff-threshold"), - backoffRate = resizerConfig.getDouble("backoff-rate"), - stopDelay = Duration(resizerConfig.getMilliseconds("stop-delay"), TimeUnit.MILLISECONDS), - messagesPerResize = resizerConfig.getInt("messages-per-resize"))) + Some(DefaultResizer(deployment.getConfig("resizer"))) } else { None } @@ -77,7 +69,17 @@ class Deployer(val settings: ActorSystem.Settings) { case "smallest-mailbox" ⇒ SmallestMailboxRouter(nrOfInstances, routees, resizer) case "scatter-gather" ⇒ ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within, resizer) case "broadcast" ⇒ BroadcastRouter(nrOfInstances, routees, resizer) - case x ⇒ throw new ConfigurationException("unknown router type " + x + " for path " + key) + case fqn ⇒ + val constructorSignature = Array[Class[_]](classOf[Config]) + ReflectiveAccess.createInstance[RouterConfig](fqn, constructorSignature, Array[AnyRef](deployment)) match { + case Right(router) ⇒ router + case Left(exception) ⇒ + throw new IllegalArgumentException( + ("Cannot instantiate router [%s], defined in [%s], " + + "make sure it extends [akka.routing.RouterConfig] and has constructor with " + + "[com.typesafe.config.Config] parameter") + .format(fqn, key), exception) + } } val recipe: Option[ActorRecipe] = diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index f3065788ec..0473f99fd6 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -7,8 +7,10 @@ import akka.actor._ import akka.dispatch.Future import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.TimeUnit import akka.util.{ Duration, Timeout } import akka.util.duration._ +import com.typesafe.config.Config import akka.config.ConfigurationException import scala.collection.JavaConversions.iterableAsScalaIterable @@ -760,6 +762,19 @@ trait Resizer { def resize(props: Props, actorContext: ActorContext, currentRoutees: IndexedSeq[ActorRef], routerConfig: RouterConfig) } +case object DefaultResizer { + def apply(resizerConfig: Config): DefaultResizer = + DefaultResizer( + lowerBound = resizerConfig.getInt("lower-bound"), + upperBound = resizerConfig.getInt("upper-bound"), + pressureThreshold = resizerConfig.getInt("pressure-threshold"), + rampupRate = resizerConfig.getDouble("rampup-rate"), + backoffThreshold = resizerConfig.getDouble("backoff-threshold"), + backoffRate = resizerConfig.getDouble("backoff-rate"), + stopDelay = Duration(resizerConfig.getMilliseconds("stop-delay"), TimeUnit.MILLISECONDS), + messagesPerResize = resizerConfig.getInt("messages-per-resize")) +} + case class DefaultResizer( /** * The fewest number of routees the router should ever have. diff --git a/akka-docs/java/routing.rst b/akka-docs/java/routing.rst index cdcc869b2a..42ad1108ea 100644 --- a/akka-docs/java/routing.rst +++ b/akka-docs/java/routing.rst @@ -256,6 +256,14 @@ If you are interested in how to use the VoteCountRouter it looks like this: .. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crTest +Configured Custom Router +************************ + +It is possible to define configuration properties for custom routers. In the ``router`` property of the deployment +configuration you define the fully qualified class name of the router class. The router class must extend +``akka.routing.CustomRouterConfig`` and and have constructor with ``com.typesafe.config.Config`` parameter. +The deployment section of the configuration is passed to the constructor. + Custom Resizer ************** diff --git a/akka-docs/scala/routing.rst b/akka-docs/scala/routing.rst index 4e75be8798..5b2ed24d28 100644 --- a/akka-docs/scala/routing.rst +++ b/akka-docs/scala/routing.rst @@ -255,6 +255,14 @@ All in all the custom router looks like this: If you are interested in how to use the VoteCountRouter you can have a look at the test class `RoutingSpec `_ +Configured Custom Router +************************ + +It is possible to define configuration properties for custom routers. In the ``router`` property of the deployment +configuration you define the fully qualified class name of the router class. The router class must extend +``akka.routing.RouterConfig`` and and have constructor with ``com.typesafe.config.Config`` parameter. +The deployment section of the configuration is passed to the constructor. + Custom Resizer ************** From b01640fddb9987ee305f49eaad0e935dec2af950 Mon Sep 17 00:00:00 2001 From: Roland Date: Fri, 13 Jan 2012 13:50:42 +0100 Subject: [PATCH 45/84] incorporate review feedback - tons of documentation added - lift extraction of logClass into LogSource type-class - prefer Props.empty --- .../scala/akka/actor/DeathWatchSpec.scala | 8 +- .../akka/actor/dispatch/ActorModelSpec.scala | 5 +- .../scala/akka/event/LoggingReceiveSpec.scala | 2 +- .../src/main/scala/akka/event/Logging.scala | 284 ++++++++++-------- .../scala/akka/event/LoggingReceive.scala | 3 +- akka-docs/java/logging.rst | 22 +- .../code/akka/docs/event/LoggingDocSpec.scala | 18 ++ akka-docs/scala/logging.rst | 49 ++- .../main/scala/akka/event/slf4j/SLF4J.scala | 16 +- 9 files changed, 260 insertions(+), 147 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index 59fe72cc07..cd6dc58129 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -33,7 +33,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout } "notify with one Terminated message when an Actor is stopped" in { - val terminal = system.actorOf(Props(context ⇒ { case _ ⇒ })) + val terminal = system.actorOf(Props.empty) startWatching(terminal) ! "hallo" expectMsg("hallo") // this ensures that the DaemonMsgWatch has been received before we send the PoisonPill @@ -43,7 +43,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout } "notify with one Terminated message when an Actor is already dead" in { - val terminal = system.actorOf(Props(context ⇒ { case _ ⇒ })) + val terminal = system.actorOf(Props.empty) terminal ! PoisonPill @@ -52,7 +52,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout } "notify with all monitors with one Terminated message when an Actor is stopped" in { - val terminal = system.actorOf(Props(context ⇒ { case _ ⇒ })) + val terminal = system.actorOf(Props.empty) val monitor1, monitor2, monitor3 = startWatching(terminal) terminal ! PoisonPill @@ -67,7 +67,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout } "notify with _current_ monitors with one Terminated message when an Actor is stopped" in { - val terminal = system.actorOf(Props(context ⇒ { case _ ⇒ })) + val terminal = system.actorOf(Props.empty) val monitor1, monitor3 = startWatching(terminal) val monitor2 = system.actorOf(Props(new Actor { context.watch(terminal) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index b0d831dc77..fb75ab5593 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -208,8 +208,9 @@ object ActorModelSpec { await(deadline)(stats.restarts.get() == restarts) } catch { case e ⇒ - system.eventStream.publish(Error(e, Option(dispatcher).toString, - if (dispatcher ne null) dispatcher.getClass else this.getClass, + system.eventStream.publish(Error(e, + Option(dispatcher).toString, + (Option(dispatcher) getOrElse this).getClass, "actual: " + stats + ", required: InterceptorStats(susp=" + suspensions + ",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters + ",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts)) diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala index 6d524729dd..bcfb9c391b 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala @@ -59,7 +59,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd } val log = LoggingReceive("funky")(r) log.isDefinedAt("hallo") - expectMsg(1 second, Logging.Debug("funky", classOf[String], "received unhandled message hallo")) + expectMsg(1 second, Logging.Debug("funky", classOf[DummyClassForStringSources], "received unhandled message hallo")) } } diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 33c4b1339e..07a1da1da5 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -168,15 +168,85 @@ trait LoggingBus extends ActorEventBus { } +/** + * This trait defines the interface to be provided by a “log source formatting + * rule” as used by [[akka.event.Logging]]’s `apply`/`create` method. + * + * See the companion object for default implementations. + * + * Example: + * {{{ + * trait MyType { // as an example + * def name: String + * } + * + * implicit val myLogSourceType: LogSource[MyType] = new LogSource { + * def genString(a: MyType) = a.name + * } + * + * class MyClass extends MyType { + * val log = Logging(eventStream, this) // will use "hallo" as logSource + * def name = "hallo" + * } + * }}} + * + * The second variant is used for including the actor system’s address: + * {{{ + * trait MyType { // as an example + * def name: String + * } + * + * implicit val myLogSourceType: LogSource[MyType] = new LogSource { + * def genString(a: MyType) = a.name + * def genString(a: MyType, s: ActorSystem) = a.name + "," + s + * } + * + * class MyClass extends MyType { + * val sys = ActorSyste("sys") + * val log = Logging(sys, this) // will use "hallo,akka://sys" as logSource + * def name = "hallo" + * } + * }}} + * + * The default implementation of the second variant will just call the first. + */ trait LogSource[-T] { def genString(t: T): String def genString(t: T, system: ActorSystem): String = genString(t) + def getClazz(t: T): Class[_] = t.getClass } +/** + * This is a “marker” class which is inserted as originator class into + * [[akka.event.LogEvent]] when the string representation was supplied + * directly. + */ +class DummyClassForStringSources + +/** + * This object holds predefined formatting rules for log sources. + * + * In case an [[akka.actor.ActorSystem]] is provided, the following apply: + *
      + *
    • [[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path
    • + *
    • providing a `String` as source will append "()" and use the result
    • + *
    • providing a `Class` will extract its simple name, append "()" and use the result
    • + *
    • anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it
    • + *
    + * + * In case a [[akka.event.LoggingBus]] is provided, the following apply: + *
      + *
    • [[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path
    • + *
    • providing a `String` as source will be used as is
    • + *
    • providing a `Class` will extract its simple name
    • + *
    • anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it
    • + *
    + */ object LogSource { implicit val fromString: LogSource[String] = new LogSource[String] { def genString(s: String) = s override def genString(s: String, system: ActorSystem) = s + "(" + system + ")" + override def getClazz(s: String) = classOf[DummyClassForStringSources] } implicit val fromActor: LogSource[Actor] = new LogSource[Actor] { @@ -191,29 +261,54 @@ object LogSource { val fromClass: LogSource[Class[_]] = new LogSource[Class[_]] { def genString(c: Class[_]) = simpleName(c) override def genString(c: Class[_], system: ActorSystem) = simpleName(c) + "(" + system + ")" + override def getClazz(c: Class[_]) = c } implicit def fromAnyClass[T]: LogSource[Class[T]] = fromClass.asInstanceOf[LogSource[Class[T]]] - def apply[T: LogSource](o: T): String = implicitly[LogSource[T]].genString(o) + /** + * Convenience converter access: given an implicit `LogSource`, generate the + * string representation and originating class. + */ + def apply[T: LogSource](o: T): (String, Class[_]) = { + val ls = implicitly[LogSource[T]] + (ls.genString(o), ls.getClazz(o)) + } - def apply[T: LogSource](o: T, system: ActorSystem): String = implicitly[LogSource[T]].genString(o, system) + /** + * Convenience converter access: given an implicit `LogSource` and + * [[akka.actor.ActorSystem]], generate the string representation and + * originating class. + */ + def apply[T: LogSource](o: T, system: ActorSystem): (String, Class[_]) = { + val ls = implicitly[LogSource[T]] + (ls.genString(o, system), ls.getClazz(o)) + } - def fromAnyRef(o: AnyRef): String = + /** + * construct string representation for any object according to + * rules above with fallback to its `Class`’s simple name. + */ + def fromAnyRef(o: AnyRef): (String, Class[_]) = o match { - case c: Class[_] ⇒ fromClass.genString(c) - case a: Actor ⇒ fromActor.genString(a) - case a: ActorRef ⇒ fromActorRef.genString(a) - case s: String ⇒ s - case x ⇒ simpleName(x) + case c: Class[_] ⇒ apply(c) + case a: Actor ⇒ apply(a) + case a: ActorRef ⇒ apply(a) + case s: String ⇒ apply(s) + case x ⇒ (simpleName(x), x.getClass) } - def fromAnyRef(o: AnyRef, system: ActorSystem): String = + /** + * construct string representation for any object according to + * rules above (including the actor system’s address) with fallback to its + * `Class`’s simple name. + */ + def fromAnyRef(o: AnyRef, system: ActorSystem): (String, Class[_]) = o match { - case c: Class[_] ⇒ fromClass.genString(c, system) - case a: Actor ⇒ fromActor.genString(a, system) - case a: ActorRef ⇒ fromActorRef.genString(a, system) - case s: String ⇒ fromString.genString(s, system) - case x ⇒ simpleName(x) + "(" + system + ")" + case c: Class[_] ⇒ apply(c) + case a: Actor ⇒ apply(a) + case a: ActorRef ⇒ apply(a) + case s: String ⇒ apply(s) + case x ⇒ (simpleName(x) + "(" + system + ")", x.getClass) } } @@ -322,140 +417,79 @@ object Logging { /** * Obtain LoggingAdapter for the given actor system and source object. This - * will use the system’s event stream. + * will use the system’s event stream and include the system’s address in the + * log source string. * - * The source is used to identify the source of this logging channel and must have - * a corresponding implicit LogSource[T] instance in scope; by default these are - * provided for Class[_], Actor, ActorRef and String types. By these, the source - * object is translated to a String according to the following rules: - *
      - *
    • if it is an Actor or ActorRef, its path is used
    • - *
    • in case of a String it is used as is
    • - *
    • in case of a class an approximation of its simpleName - *
    • and in all other cases the simpleName of its class
    • - *
    - * - * You can add your own rules quite easily: + * Do not use this if you want to supply a log category string (like + * “com.example.app.whatever”) unaltered, supply `system.eventStream` in this + * case or use * * {{{ - * trait MyType { // as an example - * def name: String - * } - * - * implicit val myLogSourceType: LogSource[MyType] = new LogSource { - * def genString(a: MyType) = a.name - * } - * - * class MyClass extends MyType { - * val log = Logging(eventStream, this) // will use "hallo" as logSource - * def name = "hallo" - * } + * Logging(system, this.getClass) * }}} + * + * The source is used to identify the source of this logging channel and + * must have a corresponding implicit LogSource[T] instance in scope; by + * default these are provided for Class[_], Actor, ActorRef and String types. + * See the companion object of [[akka.event.LogSource]] for details. + * + * You can add your own rules quite easily, see [[akka.event.LogSource]]. */ - def apply[T: LogSource](system: ActorSystem, logSource: T): LoggingAdapter = - new BusLogging(system.eventStream, LogSource(logSource, system), logSource.getClass) + def apply[T: LogSource](system: ActorSystem, logSource: T): LoggingAdapter = { + val (str, clazz) = LogSource(logSource, system) + new BusLogging(system.eventStream, str, clazz) + } /** * Obtain LoggingAdapter for the given logging bus and source object. * - * The source is used to identify the source of this logging channel and must have - * a corresponding implicit LogSource[T] instance in scope; by default these are - * provided for Class[_], Actor, ActorRef and String types. By these, the source - * object is translated to a String according to the following rules: - *
      - *
    • if it is an Actor or ActorRef, its path is used
    • - *
    • in case of a String it is used as is
    • - *
    • in case of a class an approximation of its simpleName - *
    • and in all other cases the simpleName of its class
    • - *
    + * The source is used to identify the source of this logging channel and + * must have a corresponding implicit LogSource[T] instance in scope; by + * default these are provided for Class[_], Actor, ActorRef and String types. + * See the companion object of [[akka.event.LogSource]] for details. * - * You can add your own rules quite easily: - * - * {{{ - * trait MyType { // as an example - * def name: String - * } - * - * implicit val myLogSourceType: LogSource[MyType] = new LogSource { - * def genString(a: MyType) = a.name - * } - * - * class MyClass extends MyType { - * val log = Logging(eventStream, this) // will use "hallo" as logSource - * def name = "hallo" - * } - * }}} + * You can add your own rules quite easily, see [[akka.event.LogSource]]. */ - def apply[T: LogSource](bus: LoggingBus, logSource: T): LoggingAdapter = - new BusLogging(bus, implicitly[LogSource[T]].genString(logSource), logSource.getClass) + def apply[T: LogSource](bus: LoggingBus, logSource: T): LoggingAdapter = { + val (str, clazz) = LogSource(logSource) + new BusLogging(bus, str, clazz) + } /** * Obtain LoggingAdapter for the given actor system and source object. This - * will use the system’s event stream. + * will use the system’s event stream and include the system’s address in the + * log source string. * - * The source is used to identify the source of this logging channel and must have - * a corresponding implicit LogSource[T] instance in scope; by default these are - * provided for Class[_], Actor, ActorRef and String types. By these, the source - * object is translated to a String according to the following rules: - *
      - *
    • if it is an Actor or ActorRef, its path is used
    • - *
    • in case of a String it is used as is
    • - *
    • in case of a class an approximation of its simpleName - *
    • and in all other cases the simpleName of its class
    • - *
    - * - * You can add your own rules quite easily: + * Do not use this if you want to supply a log category string (like + * “com.example.app.whatever”) unaltered, supply `system.eventStream` in this + * case or use * * {{{ - * trait MyType { // as an example - * def name: String - * } - * - * implicit val myLogSourceType: LogSource[MyType] = new LogSource { - * def genString(a: MyType) = a.name - * } - * - * class MyClass extends MyType { - * val log = Logging(eventStream, this) // will use "hallo" as logSource - * def name = "hallo" - * } + * Logging.getLogger(system, this.getClass()); * }}} + * + * The source is used to identify the source of this logging channel and + * must have a corresponding implicit LogSource[T] instance in scope; by + * default these are provided for Class[_], Actor, ActorRef and String types. + * See the companion object of [[akka.event.LogSource]] for details. */ - def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = apply(system, LogSource.fromAnyRef(logSource, system)) + def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = { + val (str, clazz) = LogSource.fromAnyRef(logSource, system) + new BusLogging(system.eventStream, str, clazz) + } /** - * Obtain LoggingAdapter for the given logging bus and source object. This - * will use the system’s event stream. + * Obtain LoggingAdapter for the given logging bus and source object. * - * The source is used to identify the source of this logging channel and must have - * a corresponding implicit LogSource[T] instance in scope; by default these are - * provided for Class[_], Actor, ActorRef and String types. By these, the source - * object is translated to a String according to the following rules: - *
      - *
    • if it is an Actor or ActorRef, its path is used
    • - *
    • in case of a String it is used as is
    • - *
    • in case of a class an approximation of its simpleName - *
    • and in all other cases the simpleName of its class
    • - *
    - * - * You can add your own rules quite easily: - * - * {{{ - * trait MyType { // as an example - * def name: String - * } - * - * implicit val myLogSourceType: LogSource[MyType] = new LogSource { - * def genString(a: MyType) = a.name - * } - * - * class MyClass extends MyType { - * val log = Logging(eventStream, this) // will use "hallo" as logSource - * def name = "hallo" - * } - * }}} + * The source is used to identify the source of this logging channel and + * must have a corresponding implicit LogSource[T] instance in scope; by + * default these are provided for Class[_], Actor, ActorRef and String types. + * See the companion object of [[akka.event.LogSource]] for details. */ - //def getLogger(bus: LoggingBus, logSource: AnyRef): LoggingAdapter = apply(bus, LogSource.fromAnyRef(logSource)) + def getLogger(bus: LoggingBus, logSource: AnyRef): LoggingAdapter = { + val (str, clazz) = LogSource.fromAnyRef(logSource) + new BusLogging(bus, str, clazz) + } /** * Artificial exception injected into Error events if no Throwable is diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index bb5a282856..27d829de5e 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -36,7 +36,8 @@ object LoggingReceive { class LoggingReceive(source: AnyRef, r: Receive)(implicit system: ActorSystem) extends Receive { def isDefinedAt(o: Any) = { val handled = r.isDefinedAt(o) - system.eventStream.publish(Debug(LogSource.fromAnyRef(source), source.getClass, "received " + (if (handled) "handled" else "unhandled") + " message " + o)) + val (str, clazz) = LogSource.fromAnyRef(source) + system.eventStream.publish(Debug(str, clazz, "received " + (if (handled) "handled" else "unhandled") + " message " + o)) handled } def apply(o: Any): Unit = r(o) diff --git a/akka-docs/java/logging.rst b/akka-docs/java/logging.rst index aee644c175..ffee92d00e 100644 --- a/akka-docs/java/logging.rst +++ b/akka-docs/java/logging.rst @@ -17,8 +17,13 @@ as illustrated in this example: .. includecode:: code/akka/docs/event/LoggingDocTestBase.java :include: imports,my-actor -The second parameter to the ``Logging.getLogger`` is the source of this logging channel. -The source object is translated to a String according to the following rules: +The first parameter to ``Logging.getLogger`` could also be any +:class:`LoggingBus`, specifically ``system.eventStream()``; in the demonstrated +case, the actor system’s address is included in the ``akkaSource`` +representation of the log source (see `Logging Thread and Akka Source in MDC`_) +while in the second case this is not automatically done. The second parameter +to ``Logging.getLogger`` is the source of this logging channel. The source +object is translated to a String according to the following rules: * if it is an Actor or ActorRef, its path is used * in case of a String it is used as is @@ -28,6 +33,13 @@ The source object is translated to a String according to the following rules: The log message may contain argument placeholders ``{}``, which will be substituted if the log level is enabled. +The Java :class:`Class` of the log source is also included in the generated +:class:`LogEvent`. In case of a simple string this is replaced with a “marker” +class :class:`akka.event.DummyClassForStringSources` in order to allow special +treatment of this case, e.g. in the SLF4J event listener which will then use +the string instead of the class’ name for looking up the logger instance to +use. + Event Handler ============= @@ -96,6 +108,12 @@ With Logback the thread name is available with ``%X{sourceThread}`` specifier wi +.. note:: + + It will probably be a good idea to use the ``sourceThread`` MDC value also in + non-Akka parts of the application in order to have this property consistently + available in the logs. + Another helpful facility is that Akka captures the actor’s address when instantiating a logger within it, meaning that the full instance identification is available for associating log messages e.g. with members of a router. This diff --git a/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala b/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala index c3c070d374..652c36af3f 100644 --- a/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/event/LoggingDocSpec.scala @@ -47,6 +47,24 @@ object LoggingDocSpec { } //#my-event-listener + //#my-source + import akka.event.LogSource + import akka.actor.ActorSystem + + object MyType { + implicit val logSource: LogSource[AnyRef] = new LogSource[AnyRef] { + def genString(o: AnyRef): String = o.getClass.getName + override def getClazz(o: AnyRef): Class[_] = o.getClass + } + } + + class MyType(system: ActorSystem) { + import MyType._ + import akka.event.Logging + + val log = Logging(system, this) + } + //#my-source } class LoggingDocSpec extends AkkaSpec { diff --git a/akka-docs/scala/logging.rst b/akka-docs/scala/logging.rst index f4272c5da0..debafcedc5 100644 --- a/akka-docs/scala/logging.rst +++ b/akka-docs/scala/logging.rst @@ -22,6 +22,8 @@ For convenience you can mixin the ``log`` member into actors, instead of definin .. code-block:: scala class MyActor extends Actor with akka.actor.ActorLogging { + ... + } The second parameter to the ``Logging`` is the source of this logging channel. The source object is translated to a String according to the following rules: @@ -29,17 +31,46 @@ The source object is translated to a String according to the following rules: * if it is an Actor or ActorRef, its path is used * in case of a String it is used as is * in case of a class an approximation of its simpleName - * and in all other cases the simpleName of its class + * and in all other cases a compile error occurs unless and implicit + :class:`LogSource[T]` is in scope for the type in question. The log message may contain argument placeholders ``{}``, which will be substituted if the log level is enabled. +Translating Log Source to String and Class +------------------------------------------ + +The rules for translating the source object to the source string and class +which are inserted into the :class:`LogEvent` during runtime are implemented +using implicit parameters and thus fully customizable: simply create your own +instance of :class:`LogSource[T]` and have it in scope when creating the +logger. + +.. includecode:: code/akka/docs/event/LoggingDocSpec.scala#my-source + +This example creates a log source which mimics traditional usage of Java +loggers, which are based upon the originating object’s class name as log +category. The override of :meth:`getClazz` is only included for demonstration +purposes as it contains exactly the default behavior. + +.. note:: + + You may also create the string representation up front and pass that in as + the log source, but be aware that then the :class:`Class[_]` which will be + put in the :class:`LogEvent` is + :class:`akka.event.DummyClassForStringSources`. + + The SLF4J event listener treats this case specially (using the actual string + to look up the logger instance to use instead of the class’ name), and you + might want to do this also in case you implement your own loggin adapter. + Event Handler ============= -Logging is performed asynchronously through an event bus. You can configure which event handlers that should -subscribe to the logging events. That is done using the 'event-handlers' element in the :ref:`configuration`. -Here you can also define the log level. +Logging is performed asynchronously through an event bus. You can configure +which event handlers that should subscribe to the logging events. That is done +using the ``event-handlers`` element in the :ref:`configuration`. Here you can +also define the log level. .. code-block:: ruby @@ -50,7 +81,8 @@ Here you can also define the log level. loglevel = "DEBUG" } -The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an :ref:`slf4j-scala` +The default one logs to STDOUT and is registered by default. It is not intended +to be used for production. There is also an :ref:`slf4j-scala` event handler available in the 'akka-slf4j' module. Example of creating a listener: @@ -58,7 +90,6 @@ Example of creating a listener: .. includecode:: code/akka/docs/event/LoggingDocSpec.scala :include: my-event-listener - .. _slf4j-scala: SLF4J @@ -98,6 +129,12 @@ With Logback the thread name is available with ``%X{sourceThread}`` specifier wi +.. note:: + + It will probably be a good idea to use the ``sourceThread`` MDC value also in + non-Akka parts of the application in order to have this property consistently + available in the logs. + Another helpful facility is that Akka captures the actor’s address when instantiating a logger within it, meaning that the full instance identification is available for associating log messages e.g. with members of a router. This diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala index 4831d78270..91a6cd7bf2 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala @@ -8,6 +8,7 @@ import org.slf4j.{ Logger ⇒ SLFLogger, LoggerFactory ⇒ SLFLoggerFactory } import org.slf4j.MDC import akka.event.Logging._ import akka.actor._ +import akka.event.DummyClassForStringSources /** * Base trait for all classes that wants to be able use the SLF4J logging infrastructure. @@ -19,7 +20,10 @@ trait SLF4JLogging { object Logger { def apply(logger: String): SLFLogger = SLFLoggerFactory getLogger logger - def apply(logClass: Class[_]): SLFLogger = SLFLoggerFactory getLogger logClass + def apply(logClass: Class[_], logSource: String): SLFLogger = logClass match { + case c if c == classOf[DummyClassForStringSources] ⇒ apply(logSource) + case _ ⇒ SLFLoggerFactory getLogger logClass + } def root: SLFLogger = apply(SLFLogger.ROOT_LOGGER_NAME) } @@ -39,24 +43,24 @@ class Slf4jEventHandler extends Actor with SLF4JLogging { case event @ Error(cause, logSource, logClass, message) ⇒ withMdc(logSource, event.thread.getName) { cause match { - case Error.NoCause ⇒ Logger(logClass).error(message.toString) - case _ ⇒ Logger(logClass).error(message.toString, cause) + case Error.NoCause ⇒ Logger(logClass, logSource).error(message.toString) + case _ ⇒ Logger(logClass, logSource).error(message.toString, cause) } } case event @ Warning(logSource, logClass, message) ⇒ withMdc(logSource, event.thread.getName) { - Logger(logClass).warn("{}", message.asInstanceOf[AnyRef]) + Logger(logClass, logSource).warn("{}", message.asInstanceOf[AnyRef]) } case event @ Info(logSource, logClass, message) ⇒ withMdc(logSource, event.thread.getName) { - Logger(logClass).info("{}", message.asInstanceOf[AnyRef]) + Logger(logClass, logSource).info("{}", message.asInstanceOf[AnyRef]) } case event @ Debug(logSource, logClass, message) ⇒ withMdc(logSource, event.thread.getName) { - Logger(logClass).debug("{}", message.asInstanceOf[AnyRef]) + Logger(logClass, logSource).debug("{}", message.asInstanceOf[AnyRef]) } case InitializeLogger(_) ⇒ From 6dc0880e52acddc3fe36ea80e0eab11cc5b2a25a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 13 Jan 2012 16:38:33 +0100 Subject: [PATCH 46/84] Fixed issue with cancel of scheduled recurring tasks. See #1641 * The problem was that the Timeout in the returned Cancellable was only for the scheduled initial task, then task is scheduled for each tick with new Timeout, which is was never used * Solved it with another Cancellable implementation that always delegates to the valid Timeout instance * Added tests for it --- .../test/scala/akka/actor/SchedulerSpec.scala | 36 ++++- .../scala/akka/actor/ActorRefProvider.scala | 153 +++++++++++------- 2 files changed, 126 insertions(+), 63 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index ba06a90023..6126911162 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -1,13 +1,11 @@ package akka.actor import org.scalatest.BeforeAndAfterEach -import akka.testkit.AkkaSpec -import akka.testkit.EventFilter import akka.util.duration._ import java.util.concurrent.{ CountDownLatch, ConcurrentLinkedQueue, TimeUnit } -import akka.testkit.DefaultTimeout -import akka.testkit.TestLatch +import akka.testkit._ import akka.dispatch.Await +import java.util.concurrent.atomic.AtomicInteger @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout { @@ -94,6 +92,36 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout assert(ticks.await(3, TimeUnit.SECONDS) == false) //No counting down should've been made } + "be cancellable during initial delay" in { + val ticks = new AtomicInteger + + val initialDelay = 200.milliseconds.dilated + val delay = 10.milliseconds.dilated + val timeout = collectCancellable(system.scheduler.schedule(initialDelay, delay) { + ticks.incrementAndGet() + }) + 10.milliseconds.dilated.sleep() + timeout.cancel() + (initialDelay + 100.milliseconds.dilated).sleep() + + ticks.get must be(0) + } + + "be cancellable after initial delay" in { + val ticks = new AtomicInteger + + val initialDelay = 20.milliseconds.dilated + val delay = 200.milliseconds.dilated + val timeout = collectCancellable(system.scheduler.schedule(initialDelay, delay) { + ticks.incrementAndGet() + }) + (initialDelay + 100.milliseconds.dilated).sleep() + timeout.cancel() + (delay + 100.milliseconds.dilated).sleep() + + ticks.get must be(1) + } + /** * ticket #307 */ diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 67a1603105..9abb0c2104 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -5,7 +5,10 @@ package akka.actor import java.util.concurrent.atomic.AtomicLong -import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer } +import org.jboss.netty.akka.util.HashedWheelTimer +import org.jboss.netty.akka.util.TimerTask +import org.jboss.netty.akka.util.Timer +import org.jboss.netty.akka.util.{ Timeout ⇒ HWTimeout } import akka.util.Timeout.intToTimeout import akka.config.ConfigurationException import akka.dispatch._ @@ -538,79 +541,78 @@ class LocalDeathWatch(val mapSize: Int) extends DeathWatch with ActorClassificat */ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, dispatcher: ⇒ MessageDispatcher) extends Scheduler with Closeable { - import org.jboss.netty.akka.util.{ Timeout ⇒ HWTimeout } - - def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = - new DefaultCancellable(hashedWheelTimer.newTimeout(createContinuousTask(delay, receiver, message), initialDelay)) - - def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = - new DefaultCancellable(hashedWheelTimer.newTimeout(createContinuousTask(delay, f), initialDelay)) - - def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = - new DefaultCancellable(hashedWheelTimer.newTimeout(createContinuousTask(delay, runnable), initialDelay)) - - def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = - new DefaultCancellable(hashedWheelTimer.newTimeout(createSingleTask(runnable), delay)) - - def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = - new DefaultCancellable(hashedWheelTimer.newTimeout(createSingleTask(receiver, message), delay)) - - def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = - new DefaultCancellable(hashedWheelTimer.newTimeout(createSingleTask(f), delay)) - - private def createSingleTask(runnable: Runnable): TimerTask = - new TimerTask() { - def run(timeout: org.jboss.netty.akka.util.Timeout) { dispatcher.execute(runnable) } - } - - private def createSingleTask(receiver: ActorRef, message: Any): TimerTask = - new TimerTask { - def run(timeout: org.jboss.netty.akka.util.Timeout) { - receiver ! message - } - } - - private def createSingleTask(f: ⇒ Unit): TimerTask = - new TimerTask { - def run(timeout: org.jboss.netty.akka.util.Timeout) { - dispatcher.execute(new Runnable { def run = f }) - } - } - - private def createContinuousTask(delay: Duration, receiver: ActorRef, message: Any): TimerTask = { - new TimerTask { - def run(timeout: org.jboss.netty.akka.util.Timeout) { + def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = { + val continuousCancellable = new ContinuousCancellable + val task = new TimerTask with ContinuousScheduling { + def run(timeout: HWTimeout) { // Check if the receiver is still alive and kicking before sending it a message and reschedule the task if (!receiver.isTerminated) { receiver ! message - try timeout.getTimer.newTimeout(this, delay) catch { - case _: IllegalStateException ⇒ // stop recurring if timer is stopped - } + scheduleNext(timeout, delay, continuousCancellable) } else { log.warning("Could not reschedule message to be sent because receiving actor has been terminated.") } } } + continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) + continuousCancellable } - private def createContinuousTask(delay: Duration, f: ⇒ Unit): TimerTask = { - new TimerTask { - def run(timeout: org.jboss.netty.akka.util.Timeout) { + def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = { + val continuousCancellable = new ContinuousCancellable + val task = new TimerTask with ContinuousScheduling { + def run(timeout: HWTimeout) { dispatcher.execute(new Runnable { def run = f }) - try timeout.getTimer.newTimeout(this, delay) catch { - case _: IllegalStateException ⇒ // stop recurring if timer is stopped - } + scheduleNext(timeout, delay, continuousCancellable) } } + continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) + continuousCancellable } - private def createContinuousTask(delay: Duration, runnable: Runnable): TimerTask = { - new TimerTask { - def run(timeout: org.jboss.netty.akka.util.Timeout) { + def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = { + val continuousCancellable = new ContinuousCancellable + val task = new TimerTask with ContinuousScheduling { + def run(timeout: HWTimeout) { dispatcher.execute(runnable) - try timeout.getTimer.newTimeout(this, delay) catch { - case _: IllegalStateException ⇒ // stop recurring if timer is stopped - } + scheduleNext(timeout, delay, continuousCancellable) + } + } + continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) + continuousCancellable + } + + def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = { + val task = new TimerTask() { + def run(timeout: HWTimeout) { dispatcher.execute(runnable) } + } + new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) + } + + def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = { + val task = new TimerTask { + def run(timeout: HWTimeout) { + receiver ! message + } + } + new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) + } + + def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = { + val task = new TimerTask { + def run(timeout: HWTimeout) { + dispatcher.execute(new Runnable { def run = f }) + } + } + new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) + } + + private trait ContinuousScheduling { this: TimerTask ⇒ + def scheduleNext(timeout: HWTimeout, delay: Duration, delegator: ContinuousCancellable) { + try { + delegator.swap(timeout.getTimer.newTimeout(this, delay)) + } catch { + case _: IllegalStateException ⇒ // stop recurring if timer is stopped } } } @@ -628,7 +630,40 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, } } -class DefaultCancellable(val timeout: org.jboss.netty.akka.util.Timeout) extends Cancellable { +/** + * Wrapper of a [[org.jboss.netty.akka.util.Timeout]] that delegates all + * methods. Needed to be able to cancel continuous tasks, + * since they create new Timeout for each tick. + */ +private[akka] class ContinuousCancellable extends Cancellable { + private var delegate: HWTimeout = _ + private var cancelled = false + + private[akka] def init(initialTimeout: HWTimeout): Unit = synchronized { + delegate = initialTimeout + } + + private[akka] def swap(newTimeout: HWTimeout): Unit = synchronized { + val wasCancelled = isCancelled + delegate = newTimeout + if (wasCancelled) cancel() + } + + def isCancelled(): Boolean = synchronized { + // delegate is initially null, but this object will not be exposed to the world until after init + cancelled || delegate.isCancelled() + } + + def cancel(): Unit = synchronized { + // the underlying Timeout will not become cancelled once the task has been started to run, + // therefore we keep a flag here to make sure that rescheduling doesn't occur when cancelled + cancelled = true + // delegate is initially null, but this object will not be exposed to the world until after init + delegate.cancel() + } +} + +class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { def cancel() { timeout.cancel() } From fcbbc892cb948baab491eacaaf9d9c92b0eb9757 Mon Sep 17 00:00:00 2001 From: viktorklang Date: Fri, 13 Jan 2012 20:55:12 +0100 Subject: [PATCH 47/84] typo --- akka-docs/intro/deployment-scenarios.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/akka-docs/intro/deployment-scenarios.rst b/akka-docs/intro/deployment-scenarios.rst index c76284d62c..f8d6f6b197 100644 --- a/akka-docs/intro/deployment-scenarios.rst +++ b/akka-docs/intro/deployment-scenarios.rst @@ -1,4 +1,3 @@ - .. _deployment-scenarios: ################################### @@ -28,7 +27,7 @@ Actors as services ^^^^^^^^^^^^^^^^^^ The simplest way you can use Akka is to use the actors as services in your Web -application. All that’s needed to do that is to put the Akka charts as well as +application. All that’s needed to do that is to put the Akka jars as well as its dependency jars into ``WEB-INF/lib``. You also need to put the :ref:`configuration` file in the ``$AKKA_HOME/config`` directory. Now you can create your Actors as regular services referenced from your Web application. You should also From a26876f5c6cc70fab15cc9f46620391d131c536f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 16 Jan 2012 10:15:55 +0100 Subject: [PATCH 48/84] Improvements from feedback. See #1641 --- .../scala/akka/actor/ActorRefProvider.scala | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 9abb0c2104..bdcbb3139f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -5,11 +5,7 @@ package akka.actor import java.util.concurrent.atomic.AtomicLong -import org.jboss.netty.akka.util.HashedWheelTimer -import org.jboss.netty.akka.util.TimerTask -import org.jboss.netty.akka.util.Timer -import org.jboss.netty.akka.util.{ Timeout ⇒ HWTimeout } -import akka.util.Timeout.intToTimeout +import org.jboss.netty.akka.util.{ Timer, TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout } import akka.config.ConfigurationException import akka.dispatch._ import akka.routing._ @@ -545,12 +541,12 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, val continuousCancellable = new ContinuousCancellable val task = new TimerTask with ContinuousScheduling { def run(timeout: HWTimeout) { - // Check if the receiver is still alive and kicking before sending it a message and reschedule the task - if (!receiver.isTerminated) { - receiver ! message - scheduleNext(timeout, delay, continuousCancellable) - } else { + receiver ! message + // Check if the receiver is still alive and kicking before reschedule the task + if (receiver.isTerminated) { log.warning("Could not reschedule message to be sent because receiving actor has been terminated.") + } else { + scheduleNext(timeout, delay, continuousCancellable) } } } @@ -559,10 +555,12 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, } def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = { + val continuousCancellable = new ContinuousCancellable - val task = new TimerTask with ContinuousScheduling { + val task = new TimerTask with ContinuousScheduling with Runnable { + def run = f def run(timeout: HWTimeout) { - dispatcher.execute(new Runnable { def run = f }) + dispatcher execute this scheduleNext(timeout, delay, continuousCancellable) } } @@ -636,25 +634,27 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, * since they create new Timeout for each tick. */ private[akka] class ContinuousCancellable extends Cancellable { + @volatile private var delegate: HWTimeout = _ + @volatile private var cancelled = false - private[akka] def init(initialTimeout: HWTimeout): Unit = synchronized { + private[akka] def init(initialTimeout: HWTimeout): Unit = { delegate = initialTimeout } - private[akka] def swap(newTimeout: HWTimeout): Unit = synchronized { + private[akka] def swap(newTimeout: HWTimeout): Unit = { val wasCancelled = isCancelled delegate = newTimeout - if (wasCancelled) cancel() + if (wasCancelled || isCancelled) cancel() } - def isCancelled(): Boolean = synchronized { + def isCancelled(): Boolean = { // delegate is initially null, but this object will not be exposed to the world until after init cancelled || delegate.isCancelled() } - def cancel(): Unit = synchronized { + def cancel(): Unit = { // the underlying Timeout will not become cancelled once the task has been started to run, // therefore we keep a flag here to make sure that rescheduling doesn't occur when cancelled cancelled = true From de151617f24069e2ddb7f50248962eaced09868c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 16 Jan 2012 14:11:29 +0100 Subject: [PATCH 49/84] Creating TypedProps and implementing support for wrapping an arbitrary ActorRef as a TypedActor --- .../scala/akka/actor/ActorTimeoutSpec.scala | 12 +- .../scala/akka/actor/SupervisorSpec.scala | 2 +- .../scala/akka/actor/TypedActorSpec.scala | 25 +- .../scala/akka/actor/ActorRefProvider.scala | 16 +- .../src/main/scala/akka/actor/Props.scala | 13 - .../main/scala/akka/actor/TypedActor.scala | 306 ++++++++++++------ .../scala/akka/camel/ConsumerScalaTest.scala | 2 +- .../docs/actor/TypedActorDocTestBase.java | 8 +- .../docs/actor/UntypedActorDocTestBase.java | 1 - .../code/akka/docs/actor/ActorDocSpec.scala | 4 +- .../akka/docs/actor/TypedActorDocSpec.scala | 11 +- 11 files changed, 239 insertions(+), 161 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala index 038e3fc9f1..ceb7bd0783 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala @@ -14,12 +14,6 @@ import akka.util.Timeout @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeout { - def actorWithTimeout(t: Timeout): ActorRef = system.actorOf(Props(creator = () ⇒ new Actor { - def receive = { - case x ⇒ - } - }, timeout = t)) - val defaultTimeout = system.settings.ActorTimeout.duration val testTimeout = if (system.settings.ActorTimeout.duration < 400.millis) 500 millis else 100 millis @@ -27,7 +21,7 @@ class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeo "use the global default timeout if no implicit in scope" in { within(defaultTimeout - 100.millis, defaultTimeout + 400.millis) { - val echo = actorWithTimeout(Timeout(12)) + val echo = system.actorOf(Props.empty) try { val d = system.settings.ActorTimeout.duration val f = echo ? "hallo" @@ -39,7 +33,7 @@ class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeo "use implicitly supplied timeout" in { implicit val timeout = Timeout(testTimeout) within(testTimeout - 100.millis, testTimeout + 300.millis) { - val echo = actorWithTimeout(Props.defaultTimeout) + val echo = system.actorOf(Props.empty) try { val f = (echo ? "hallo").mapTo[String] intercept[AskTimeoutException] { Await.result(f, testTimeout + testTimeout) } @@ -49,7 +43,7 @@ class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeo "use explicitly supplied timeout" in { within(testTimeout - 100.millis, testTimeout + 300.millis) { - val echo = actorWithTimeout(Props.defaultTimeout) + val echo = system.actorOf(Props.empty) val f = echo.?("hallo", testTimeout) try { intercept[AskTimeoutException] { Await.result(f, testTimeout + 300.millis) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index e68e6f3906..9fe8ffb63a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -72,7 +72,7 @@ class SupervisorSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSende // Creating actors and supervisors // ===================================================== - private def child(supervisor: ActorRef, props: Props): ActorRef = Await.result((supervisor ? props).mapTo[ActorRef], props.timeout.duration) + private def child(supervisor: ActorRef, props: Props): ActorRef = Await.result((supervisor ? props).mapTo[ActorRef], timeout.duration) def temporaryActorAllForOne = { val supervisor = system.actorOf(Props[Supervisor].withFaultHandler(AllForOneStrategy(List(classOf[Exception]), Some(0)))) diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index 6a6500b131..2d99a4925f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -179,13 +179,14 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) def newFooBar: Foo = newFooBar(Duration(2, "s")) def newFooBar(d: Duration): Foo = - newFooBar(Props().withTimeout(Timeout(d))) + TypedActor(system).typedActorOf(TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(Timeout(d))) - def newFooBar(props: Props): Foo = - TypedActor(system).typedActorOf(classOf[Foo], classOf[Bar], props) + def newFooBar(dispatcher: String, d: Duration): Foo = + TypedActor(system).typedActorOf(TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(Timeout(d)).withDispatcher(dispatcher)) - def newStacked(props: Props = Props().withTimeout(Timeout(2000))): Stacked = - TypedActor(system).typedActorOf(classOf[Stacked], classOf[StackedImpl], props) + def newStacked(): Stacked = + TypedActor(system).typedActorOf( + TypedProps[StackedImpl](classOf[Stacked], classOf[StackedImpl]).withTimeout(Timeout(2000))) def mustStop(typedActor: AnyRef) = TypedActor(system).stop(typedActor) must be(true) @@ -298,11 +299,11 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) "be able to handle exceptions when calling methods" in { filterEvents(EventFilter[IllegalStateException]("expected")) { val boss = system.actorOf(Props(context ⇒ { - case p: Props ⇒ context.sender ! TypedActor(context).typedActorOf(classOf[Foo], classOf[Bar], p) + case p: TypedProps[_] ⇒ context.sender ! TypedActor(context).typedActorOf(p) }).withFaultHandler(OneForOneStrategy { case e: IllegalStateException if e.getMessage == "expected" ⇒ FaultHandlingStrategy.Resume })) - val t = Await.result((boss ? Props().withTimeout(2 seconds)).mapTo[Foo], timeout.duration) + val t = Await.result((boss ? TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(2 seconds)).mapTo[Foo], timeout.duration) t.incr() t.failingPigdog() @@ -330,7 +331,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) } "be able to support implementation only typed actors" in { - val t = TypedActor(system).typedActorOf[Foo, Bar](Props()) + val t: Foo = TypedActor(system).typedActorOf(TypedProps[Bar]()) val f = t.futurePigdog(200) val f2 = t.futurePigdog(0) f2.isCompleted must be(false) @@ -340,16 +341,14 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) } "be able to support implementation only typed actors with complex interfaces" in { - val t = TypedActor(system).typedActorOf[Stackable1 with Stackable2, StackedImpl]() + val t: Stackable1 with Stackable2 = TypedActor(system).typedActorOf(TypedProps[StackedImpl]()) t.stackable1 must be("foo") t.stackable2 must be("bar") mustStop(t) } "be able to use balancing dispatcher" in { - val props = Props(timeout = Timeout(6600), dispatcher = "pooled-dispatcher") - - val thais = for (i ← 1 to 60) yield newFooBar(props) + val thais = for (i ← 1 to 60) yield newFooBar("pooled-dispatcher", 6 seconds) val iterator = new CyclicIterator(thais) val results = for (i ← 1 to 120) yield (i, iterator.next.futurePigdog(200L, i)) @@ -405,7 +404,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) "be able to override lifecycle callbacks" in { val latch = new CountDownLatch(16) val ta = TypedActor(system) - val t: LifeCycles = ta.typedActorOf(classOf[LifeCycles], new Creator[LifeCyclesImpl] { def create = new LifeCyclesImpl(latch) }, Props()) + val t: LifeCycles = ta.typedActorOf(TypedProps[LifeCyclesImpl](classOf[LifeCycles], new LifeCyclesImpl(latch))) EventFilter[IllegalStateException]("Crash!", occurrences = 1) intercept { t.crash() } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index d940aa2c20..a4b3db0686 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -580,9 +580,9 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, private def createContinuousTask(delay: Duration, receiver: ActorRef, message: Any): TimerTask = { new TimerTask { def run(timeout: org.jboss.netty.akka.util.Timeout) { - // Check if the receiver is still alive and kicking before sending it a message and reschedule the task + receiver ! message + // Check if the receiver is still alive and kicking before rescheduling the task if (!receiver.isTerminated) { - receiver ! message try timeout.getTimer.newTimeout(this, delay) catch { case _: IllegalStateException ⇒ // stop recurring if timer is stopped } @@ -593,16 +593,8 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, } } - private def createContinuousTask(delay: Duration, f: ⇒ Unit): TimerTask = { - new TimerTask { - def run(timeout: org.jboss.netty.akka.util.Timeout) { - dispatcher.execute(new Runnable { def run = f }) - try timeout.getTimer.newTimeout(this, delay) catch { - case _: IllegalStateException ⇒ // stop recurring if timer is stopped - } - } - } - } + private def createContinuousTask(delay: Duration, f: ⇒ Unit): TimerTask = + createContinuousTask(delay, new Runnable { def run = f }) private def createContinuousTask(delay: Duration, runnable: Runnable): TimerTask = { new TimerTask { diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 0a032408a2..dccfd0bd3c 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -21,7 +21,6 @@ object Props { import FaultHandlingStrategy._ final val defaultCreator: () ⇒ Actor = () ⇒ throw new UnsupportedOperationException("No actor creator specified!") - final val defaultTimeout: Timeout = Timeout(Duration.MinusInf) final val defaultDecider: Decider = { case _: ActorInitializationException ⇒ Stop case _: ActorKilledException ⇒ Stop @@ -95,12 +94,10 @@ object Props { * val props = Props( * creator = .., * dispatcher = .., - * timeout = .., * faultHandler = .., * routerConfig = .. * ) * val props = Props().withCreator(new MyActor) - * val props = Props[MyActor].withTimeout(timeout) * val props = Props[MyActor].withRouter(RoundRobinRouter(..)) * val props = Props[MyActor].withFaultHandler(OneForOneStrategy { * case e: IllegalStateException ⇒ Resume @@ -117,7 +114,6 @@ object Props { * } * }); * Props props = new Props().withCreator(new UntypedActorFactory() { ... }); - * Props props = new Props(MyActor.class).withTimeout(timeout); * Props props = new Props(MyActor.class).withFaultHandler(new OneForOneStrategy(...)); * Props props = new Props(MyActor.class).withRouter(new RoundRobinRouter(..)); * }}} @@ -125,7 +121,6 @@ object Props { case class Props( creator: () ⇒ Actor = Props.defaultCreator, dispatcher: String = Dispatchers.DefaultDispatcherId, - timeout: Timeout = Props.defaultTimeout, faultHandler: FaultHandlingStrategy = Props.defaultFaultHandler, routerConfig: RouterConfig = Props.defaultRoutedProps) { @@ -135,7 +130,6 @@ case class Props( def this() = this( creator = Props.defaultCreator, dispatcher = Dispatchers.DefaultDispatcherId, - timeout = Props.defaultTimeout, faultHandler = Props.defaultFaultHandler) /** @@ -144,7 +138,6 @@ case class Props( def this(factory: UntypedActorFactory) = this( creator = () ⇒ factory.create(), dispatcher = Dispatchers.DefaultDispatcherId, - timeout = Props.defaultTimeout, faultHandler = Props.defaultFaultHandler) /** @@ -153,7 +146,6 @@ case class Props( def this(actorClass: Class[_ <: Actor]) = this( creator = () ⇒ actorClass.newInstance, dispatcher = Dispatchers.DefaultDispatcherId, - timeout = Props.defaultTimeout, faultHandler = Props.defaultFaultHandler, routerConfig = Props.defaultRoutedProps) @@ -183,11 +175,6 @@ case class Props( */ def withDispatcher(d: String) = copy(dispatcher = d) - /** - * Returns a new Props with the specified timeout set. - */ - def withTimeout(t: Timeout) = copy(timeout = t) - /** * Returns a new Props with the specified faulthandler set. */ diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index c1cefd8153..f143db0f8a 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -12,6 +12,7 @@ import akka.serialization.{ Serializer, Serialization } import akka.dispatch._ import akka.serialization.SerializationExtension import java.util.concurrent.TimeoutException +import java.lang.IllegalStateException trait TypedActorFactory { @@ -48,100 +49,31 @@ trait TypedActorFactory { def getActorRefFor(proxy: AnyRef): ActorRef /** - * Creates a new TypedActor proxy using the supplied Props, - * the interfaces usable by the returned proxy is the suppli ed interface class (if the class represents an interface) or - * all interfaces (Class.getInterfaces) if it's not an interface class - * - * Java API + * Creates a new TypedActor with the specified properies */ - def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: Class[T], props: Props): R = - typedActor.createProxyAndTypedActor(actorFactory, interface, impl.newInstance, props, None, interface.getClassLoader) - - /** - * Creates a new TypedActor proxy using the supplied Props, - * the interfaces usable by the returned proxy is the supplied interface class (if the class represents an interface) or - * all interfaces (Class.getInterfaces) if it's not an interface class - * - * Java API - */ - def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: Class[T], props: Props, name: String): R = - typedActor.createProxyAndTypedActor(actorFactory, interface, impl.newInstance, props, Some(name), interface.getClassLoader) - - /** - * Creates a new TypedActor proxy using the supplied Props, - * the interfaces usable by the returned proxy is the supplied interface class (if the class represents an interface) or - * all interfaces (Class.getInterfaces) if it's not an interface class - * - * Java API - */ - def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: Creator[T], props: Props): R = - typedActor.createProxyAndTypedActor(actorFactory, interface, impl.create, props, None, interface.getClassLoader) - - /** - * Creates a new TypedActor proxy using the supplied Props, - * the interfaces usable by the returned proxy is the supplied interface class (if the class represents an interface) or - * all interfaces (Class.getInterfaces) if it's not an interface class - * - * Java API - */ - def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: Creator[T], props: Props, name: String): R = - typedActor.createProxyAndTypedActor(actorFactory, interface, impl.create, props, Some(name), interface.getClassLoader) - - /** - * Creates a new TypedActor proxy using the supplied Props, - * the interfaces usable by the returned proxy is the supplied interface class (if the class represents an interface) or - * all interfaces (Class.getInterfaces) if it's not an interface class - * - * Scala API - */ - def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: ⇒ T, props: Props, name: String): R = - typedActor.createProxyAndTypedActor(actorFactory, interface, impl, props, Some(name), interface.getClassLoader) - - /** - * Creates a new TypedActor proxy using the supplied Props, - * the interfaces usable by the returned proxy is the supplied implementation class' interfaces (Class.getInterfaces) - * - * Scala API - */ - def typedActorOf[R <: AnyRef, T <: R: ClassManifest](props: Props = Props(), name: String = null): R = { - val clazz = implicitly[ClassManifest[T]].erasure.asInstanceOf[Class[T]] - typedActor.createProxyAndTypedActor(actorFactory, clazz, clazz.newInstance, props, Option(name), clazz.getClassLoader) + def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T]): R = { + val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver + val c = props.creator //Cache this to avoid closing over the Props + val ap = props.actorProps.withCreator(new akka.actor.TypedActor.TypedActor[R, T](proxyVar, c())) + typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap)) } /** - * Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself, - * to create TypedActor proxies, use typedActorOf + * Creates a new TypedActor with the specified properies */ - def createProxy[R <: AnyRef](constructor: ⇒ Actor, props: Props = Props(), name: String = null, loader: ClassLoader = null)(implicit m: Manifest[R]): R = - typedActor.createProxy[R](actorFactory, typedActor.extractInterfaces(m.erasure), (ref: AtomVar[R]) ⇒ constructor, props, Option(name), if (loader eq null) m.erasure.getClassLoader else loader) + def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T], name: String): R = { + val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver + val c = props.creator //Cache this to avoid closing over the Props + val ap = props.actorProps.withCreator(new akka.actor.TypedActor.TypedActor[R, T](proxyVar, c())) + typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap, name)) + } /** - * Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself, - * to create TypedActor proxies, use typedActorOf + * Creates a TypedActor that intercepts the calls and forwards them as [[akka.actor.TypedActor.MethodCall]] + * to the provided ActorRef. */ - def createProxy[R <: AnyRef](interfaces: Array[Class[_]], constructor: Creator[Actor], props: Props, loader: ClassLoader): R = - typedActor.createProxy(actorFactory, interfaces, (ref: AtomVar[R]) ⇒ constructor.create, props, None, loader) - - /** - * Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself, - * to create TypedActor proxies, use typedActorOf - */ - def createProxy[R <: AnyRef](interfaces: Array[Class[_]], constructor: Creator[Actor], props: Props, name: String, loader: ClassLoader): R = - typedActor.createProxy(actorFactory, interfaces, (ref: AtomVar[R]) ⇒ constructor.create, props, Some(name), loader) - - /** - * Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself, - * to create TypedActor proxies, use typedActorOf - */ - def createProxy[R <: AnyRef](interfaces: Array[Class[_]], constructor: ⇒ Actor, props: Props, loader: ClassLoader): R = - typedActor.createProxy[R](actorFactory, interfaces, (ref: AtomVar[R]) ⇒ constructor, props, None, loader) - - /** - * Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself, - * to create TypedActor proxies, use typedActorOf - */ - def createProxy[R <: AnyRef](interfaces: Array[Class[_]], constructor: ⇒ Actor, props: Props, name: String, loader: ClassLoader): R = - typedActor.createProxy[R](actorFactory, interfaces, (ref: AtomVar[R]) ⇒ constructor, props, Some(name), loader) + def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T], actorRef: ActorRef): R = + typedActor.createActorRefProxy(props, null: AtomVar[R], actorRef) } @@ -412,6 +344,173 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi } } +/** + * TypedProps is a TypedActor configuration object, that is thread safe and fully sharable. + * It's used in TypedActorFactory.typedActorOf to configure a TypedActor instance. + */ +object TypedProps { + + val defaultDispatcherId: String = Dispatchers.DefaultDispatcherId + val defaultFaultHandler: FaultHandlingStrategy = akka.actor.Props.defaultFaultHandler + val defaultTimeout: Option[Timeout] = None + val defaultLoader: Option[ClassLoader] = None + + /** + * @returns a sequence of interfaces that the speicified class implements, + * or a sequence containing only itself, if itself is an interface. + */ + def extractInterfaces(clazz: Class[_]): Seq[Class[_]] = + if (clazz.isInterface) Seq[Class[_]](clazz) else clazz.getInterfaces.toList + + /** + * Uses the supplied class as the factory for the TypedActor implementation, + * proxying all the interfaces it implements. + * + * Scala API + */ + def apply[T <: AnyRef](implementation: Class[T]): TypedProps[T] = + new TypedProps[T](implementation) + + /** + * Uses the supplied class as the factory for the TypedActor implementation, + * and that has the specified interface, + * or if the interface class is not an interface, all the interfaces it implements, + * appended in the sequence of interfaces. + * + * Scala API + */ + def apply[T <: AnyRef](interface: Class[_ >: T], implementation: Class[T]): TypedProps[T] = + new TypedProps[T](extractInterfaces(interface), () ⇒ implementation.newInstance()) + + /** + * Uses the supplied thunk as the factory for the TypedActor implementation, + * and that has the specified interface, + * or if the interface class is not an interface, all the interfaces it implements, + * appended in the sequence of interfaces. + * + * Scala API + */ + def apply[T <: AnyRef](interface: Class[_ >: T], creator: ⇒ T): TypedProps[T] = + new TypedProps[T](extractInterfaces(interface), () ⇒ creator) + + /** + * Uses the supplied class as the factory for the TypedActor implementation, + * proxying all the interfaces it implements. + * + * Scala API + */ + def apply[T <: AnyRef: ClassManifest](): TypedProps[T] = + new TypedProps[T](implicitly[ClassManifest[T]].erasure.asInstanceOf[Class[T]]) +} + +/** + * TypedProps is a TypedActor configuration object, that is thread safe and fully sharable. + * It's used in TypedActorFactory.typedActorOf to configure a TypedActor instance. + */ +case class TypedProps[T <: AnyRef] protected[akka] (interfaces: Seq[Class[_]], + creator: () ⇒ T, + dispatcher: String = TypedProps.defaultDispatcherId, + faultHandler: FaultHandlingStrategy = TypedProps.defaultFaultHandler, + timeout: Option[Timeout] = TypedProps.defaultTimeout, + loader: Option[ClassLoader] = TypedProps.defaultLoader) { + + /** + * Uses the supplied class as the factory for the TypedActor implementation, + * and that has the specified interface, + * or if the interface class is not an interface, all the interfaces it implements, + * appended in the sequence of interfaces. + */ + def this(implementation: Class[T]) = + this(interfaces = TypedProps.extractInterfaces(implementation), + creator = () ⇒ implementation.newInstance()) + + /** + * Uses the supplied Creator as the factory for the TypedActor implementation, + * and that has the specified interface, + * or if the interface class is not an interface, all the interfaces it implements, + * appended in the sequence of interfaces. + * + * Java API. + */ + def this(interface: Class[_ >: T], implementation: Creator[T]) = + this(interfaces = TypedProps.extractInterfaces(interface), + creator = () ⇒ implementation.create()) + + /** + * Uses the supplied class as the factory for the TypedActor implementation, + * and that has the specified interface, + * or if the interface class is not an interface, all the interfaces it implements, + * appended in the sequence of interfaces. + * + * Java API. + */ + def this(interface: Class[_ >: T], implementation: Class[T]) = + this(interfaces = TypedProps.extractInterfaces(interface), + creator = () ⇒ implementation.newInstance()) + + /** + * Returns a new Props with the specified dispatcher set. + */ + def withDispatcher(d: String) = copy(dispatcher = d) + + /** + * Returns a new Props with the specified faulthandler set. + */ + def withFaultHandler(f: FaultHandlingStrategy) = copy(faultHandler = f) + + /** + * @returns a new Props that will use the specified ClassLoader to create its proxy class in + * If loader is null, it will use the bootstrap classloader. + * + * Java API + */ + def withLoader(loader: ClassLoader): TypedProps[T] = withLoader(Option(loader)) + + /** + * @returns a new Props that will use the specified ClassLoader to create its proxy class in + * If loader is null, it will use the bootstrap classloader. + * + * Scala API + */ + def withLoader(loader: Option[ClassLoader]): TypedProps[T] = this.copy(loader = loader) + + /** + * @returns a new Props that will use the specified Timeout for its non-void-returning methods, + * if null is specified, it will use the default ActorTimeout as specified in the configuration. + * + * Java API + */ + def withTimeout(timeout: Timeout): TypedProps[T] = this.copy(timeout = Option(timeout)) + + /** + * @returns a new Props that will use the specified Timeout for its non-void-returning methods, + * if None is specified, it will use the default ActorTimeout as specified in the configuration. + * + * Scala API + */ + def withTimeout(timeout: Option[Timeout]): TypedProps[T] = this.copy(timeout = timeout) + + /** + * Returns a new Props that has the specified interface, + * or if the interface class is not an interface, all the interfaces it implements, + * appended in the sequence of interfaces. + */ + def withInterface(interface: Class[_ >: T]): TypedProps[T] = + this.copy(interfaces = interfaces ++ TypedProps.extractInterfaces(interface)) + + /** + * Returns a new Props without the specified interface, + * or if the interface class is not an interface, all the interfaces it implements. + */ + def withoutInterface(interface: Class[_ >: T]): TypedProps[T] = + this.copy(interfaces = interfaces diff TypedProps.extractInterfaces(interface)) + + import akka.actor.{ Props ⇒ ActorProps } + def actorProps(): ActorProps = + if (dispatcher == ActorProps().dispatcher && faultHandler == ActorProps().faultHandler) ActorProps() + else ActorProps(dispatcher = dispatcher, faultHandler = faultHandler) +} + case class ContextualTypedActorFactory(typedActor: TypedActorExtension, actorFactory: ActorContext) extends TypedActorFactory { override def getActorRefFor(proxy: AnyRef): ActorRef = typedActor.getActorRefFor(proxy) override def isTypedActor(proxyOrNot: AnyRef): Boolean = typedActor.isTypedActor(proxyOrNot) @@ -440,21 +539,16 @@ class TypedActorExtension(system: ActorSystemImpl) extends TypedActorFactory wit // Private API - private[akka] def createProxy[R <: AnyRef](supervisor: ActorRefFactory, interfaces: Array[Class[_]], constructor: (AtomVar[R]) ⇒ Actor, props: Props, name: Option[String], loader: ClassLoader): R = { - val proxyVar = new AtomVar[R] - configureAndProxyLocalActorRef[R](supervisor, interfaces, proxyVar, props.withCreator(constructor(proxyVar)), name, loader) - } - - private[akka] def createProxyAndTypedActor[R <: AnyRef, T <: R](supervisor: ActorRefFactory, interface: Class[_], constructor: ⇒ T, props: Props, name: Option[String], loader: ClassLoader): R = - createProxy[R](supervisor, extractInterfaces(interface), (ref: AtomVar[R]) ⇒ new TypedActor[R, T](ref, constructor), props, name, loader) - private[akka] def configureAndProxyLocalActorRef[T <: AnyRef](supervisor: ActorRefFactory, interfaces: Array[Class[_]], proxyVar: AtomVar[T], props: Props, name: Option[String], loader: ClassLoader): T = { //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling val actorVar = new AtomVar[ActorRef](null) - val timeout = props.timeout match { + + //FIXME + val timeout = settings.ActorTimeout + /*val timeout = props.timeout match { case Props.`defaultTimeout` ⇒ settings.ActorTimeout case x ⇒ x - } + }*/ val proxy: T = Proxy.newProxyInstance(loader, interfaces, new TypedActorInvocationHandler(this, actorVar, timeout)).asInstanceOf[T] proxyVar.set(proxy) // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive val ref = if (name.isDefined) supervisor.actorOf(props, name.get) else supervisor.actorOf(props) @@ -462,7 +556,25 @@ class TypedActorExtension(system: ActorSystemImpl) extends TypedActorFactory wit proxyVar.get } - private[akka] def extractInterfaces(clazz: Class[_]): Array[Class[_]] = if (clazz.isInterface) Array[Class[_]](clazz) else clazz.getInterfaces + private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T], proxyVar: AtomVar[R], actorRef: ⇒ ActorRef): R = { + //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling + val actorVar = new AtomVar[ActorRef](null) + val classLoader: ClassLoader = if (props.loader.nonEmpty) props.loader.get else props.interfaces.headOption.map(_.getClassLoader).orNull + val proxy = Proxy.newProxyInstance( + classLoader, + props.interfaces.toArray, + new TypedActorInvocationHandler(this, actorVar, props.timeout.getOrElse(this.settings.ActorTimeout))).asInstanceOf[R] + + proxyVar match { + case null ⇒ + actorVar.set(actorRef) + proxy + case _ ⇒ + proxyVar.set(proxy) // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive + actorVar.set(actorRef) //Make sure the InvocationHandler gets ahold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet + proxyVar.get + } + } private[akka] def invocationHandlerFor(typedActor_? : AnyRef): TypedActorInvocationHandler = if ((typedActor_? ne null) && Proxy.isProxyClass(typedActor_?.getClass)) typedActor_? match { diff --git a/akka-camel/src/test/scala/akka/camel/ConsumerScalaTest.scala b/akka-camel/src/test/scala/akka/camel/ConsumerScalaTest.scala index 47dbdbba54..4382f9a2a6 100644 --- a/akka-camel/src/test/scala/akka/camel/ConsumerScalaTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConsumerScalaTest.scala @@ -100,7 +100,7 @@ class ConsumerScalaTest extends WordSpec with BeforeAndAfterAll with MustMatcher "receiving an in-out message exchange" must { "lead to a TimeoutException" in { service.awaitEndpointActivation(1) { - actorOf(Props(creator = () ⇒ new TestBlocker("direct:publish-test-5"), timeout = Timeout(1000))) + actorOf(Props(creator = () ⇒ new TestBlocker("direct:publish-test-5"))) } must be(true) try { diff --git a/akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java index 922502f1c9..6726c3e6f0 100644 --- a/akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/TypedActorDocTestBase.java @@ -4,6 +4,7 @@ package akka.docs.actor; //#imports + import akka.dispatch.*; import akka.actor.*; import akka.japi.*; @@ -103,15 +104,14 @@ public class TypedActorDocTestBase { try { //#typed-actor-create1 Squarer mySquarer = - TypedActor.get(system).typedActorOf(Squarer.class, SquarerImpl.class, new Props()); + TypedActor.get(system).typedActorOf(new TypedProps(Squarer.class, SquarerImpl.class)); //#typed-actor-create1 //#typed-actor-create2 Squarer otherSquarer = - TypedActor.get(system).typedActorOf(Squarer.class, + TypedActor.get(system).typedActorOf(new TypedProps(Squarer.class, new Creator() { public SquarerImpl create() { return new SquarerImpl("foo"); } - }, - new Props(), + }), "name"); //#typed-actor-create2 diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java index b1d84a5841..a6cbc21338 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java @@ -65,7 +65,6 @@ public class UntypedActorDocTestBase { return new MyUntypedActor(); } }); - Props props5 = props4.withTimeout(new Timeout(1000)); //#creating-props-config } diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index 20ac33480b..21f3492370 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -194,11 +194,9 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { val props3 = Props(new MyActor) val props4 = Props( creator = { () ⇒ new MyActor }, - dispatcher = "my-dispatcher", - timeout = Timeout(100)) + dispatcher = "my-dispatcher") val props5 = props1.withCreator(new MyActor) val props6 = props5.withDispatcher("my-dispatcher") - val props7 = props6.withTimeout(Timeout(100)) //#creating-props-config } diff --git a/akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala index a10ce60a36..0d6ad1e648 100644 --- a/akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/TypedActorDocSpec.scala @@ -6,7 +6,7 @@ package akka.docs.actor //#imports import akka.dispatch.{ Promise, Future, Await } import akka.util.duration._ -import akka.actor.{ ActorContext, TypedActor, Props } +import akka.actor.{ ActorContext, TypedActor, TypedProps } //#imports @@ -100,14 +100,11 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "create a typed actor" in { //#typed-actor-create1 val mySquarer: Squarer = - TypedActor(system).typedActorOf[Squarer, SquarerImpl]() + TypedActor(system).typedActorOf(TypedProps[SquarerImpl]()) //#typed-actor-create1 //#typed-actor-create2 val otherSquarer: Squarer = - TypedActor(system).typedActorOf(classOf[Squarer], - new SquarerImpl("foo"), - Props(), - "name") + TypedActor(system).typedActorOf(TypedProps(classOf[Squarer], new SquarerImpl("foo")), "name") //#typed-actor-create2 //#typed-actor-calls @@ -145,7 +142,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "supercharge" in { //#typed-actor-supercharge-usage - val awesomeFooBar = TypedActor(system).typedActorOf[Foo with Bar, FooBar]() + val awesomeFooBar: Foo with Bar = TypedActor(system).typedActorOf(TypedProps[FooBar]()) awesomeFooBar.doFoo(10) val f = awesomeFooBar.doBar("yes") From 9ec2bbe6763cd9112a308f2772984803c1bcda19 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 16 Jan 2012 14:31:41 +0100 Subject: [PATCH 50/84] Fixed invalid dependencies in akka-remote, issue with log4j 1.2.15. See #1648 --- .../actor/mailbox/MongoBasedMailboxSpec.scala | 2 - project/AkkaBuild.scala | 37 +++++++++++-------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/akka-durable-mailboxes/akka-mongo-mailbox/src/test/scala/akka/actor/mailbox/MongoBasedMailboxSpec.scala b/akka-durable-mailboxes/akka-mongo-mailbox/src/test/scala/akka/actor/mailbox/MongoBasedMailboxSpec.scala index 0167af12aa..16fcde321e 100644 --- a/akka-durable-mailboxes/akka-mongo-mailbox/src/test/scala/akka/actor/mailbox/MongoBasedMailboxSpec.scala +++ b/akka-durable-mailboxes/akka-mongo-mailbox/src/test/scala/akka/actor/mailbox/MongoBasedMailboxSpec.scala @@ -21,14 +21,12 @@ object MongoBasedMailboxSpec { @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class MongoBasedMailboxSpec extends DurableMailboxSpec("mongodb", MongoBasedMailboxSpec.config) { - import org.apache.log4j.{ Logger, Level } import com.mongodb.async._ val mongo = MongoConnection("localhost", 27017)("akka") mongo.dropDatabase() { success ⇒ } - Logger.getRootLogger.setLevel(Level.DEBUG) } /*object DurableMongoMailboxSpecActorFactory { diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index e6a64ca0aa..091346de34 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -72,7 +72,7 @@ object AkkaBuild extends Build { base = file("akka-remote"), dependencies = Seq(actor, actorTests % "test->test", testkit % "test->test"), settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( - libraryDependencies ++= Dependencies.cluster, + libraryDependencies ++= Dependencies.remote, // disable parallel tests parallelExecution in Test := false, extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => @@ -393,11 +393,16 @@ object Dependencies { Test.scalacheck, protobuf, jacksonMapper, sjson ) - val cluster = Seq( - bookkeeper, commonsCodec, commonsIo, guice, h2Lzf, jacksonCore, jacksonMapper, log4j, netty, - protobuf, sjson, zkClient, zookeeper, zookeeperLock, Test.junit, Test.scalatest + val remote = Seq( + netty, protobuf, sjson, h2Lzf, Test.junit, Test.scalatest, + Test.zookeeper, Test.log4j // needed for ZkBarrier in multi-jvm tests ) +// val cluster = Seq( +// bookkeeper, commonsCodec, commonsIo, guice, h2Lzf, jacksonCore, jacksonMapper, log4j, netty, +// protobuf, sjson, zkClient, zookeeper, zookeeperLock, Test.junit, Test.scalatest +// ) + val slf4j = Seq(slf4jApi) val agent = Seq(scalaStm, Test.scalatest, Test.junit) @@ -416,7 +421,7 @@ object Dependencies { val mongoMailbox = Seq(mongoAsync, twttrUtilCore, Test.junit) - val zookeeperMailbox = Seq(zookeeper, Test.junit) + val zookeeperMailbox = Seq(zkClient, zookeeper, Test.junit) val spring = Seq(springBeans, springContext, Test.junit, Test.scalatest) @@ -467,7 +472,7 @@ object Dependency { val jettyUtil = "org.eclipse.jetty" % "jetty-util" % V.Jetty // Eclipse license val jettyXml = "org.eclipse.jetty" % "jetty-xml" % V.Jetty // Eclipse license val jettyServlet = "org.eclipse.jetty" % "jetty-servlet" % V.Jetty // Eclipse license - val log4j = "log4j" % "log4j" % "1.2.15" // ApacheV2 + val log4j = "log4j" % "log4j" % "1.2.14" // ApacheV2 val mongoAsync = "com.mongodb.async" % "mongo-driver_2.9.0-1" % "0.2.9-1" // ApacheV2 val netty = "org.jboss.netty" % "netty" % V.Netty // ApacheV2 val osgi = "org.osgi" % "org.osgi.core" % "4.2.0" // ApacheV2 @@ -505,14 +510,16 @@ object Dependency { // Test object Test { - val commonsColl = "commons-collections" % "commons-collections" % "3.2.1" % "test" // ApacheV2 - val commonsMath = "org.apache.commons" % "commons-math" % "2.1" % "test" // ApacheV2 - val jetty = "org.eclipse.jetty" % "jetty-server" % V.Jetty % "test" // Eclipse license - val jettyWebapp = "org.eclipse.jetty" % "jetty-webapp" % V.Jetty % "test" // Eclipse license - val junit = "junit" % "junit" % "4.5" % "test" // Common Public License 1.0 - val logback = "ch.qos.logback" % "logback-classic" % V.Logback % "test" // EPL 1.0 / LGPL 2.1 - val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" // MIT - val scalatest = "org.scalatest" %% "scalatest" % V.Scalatest % "test" // ApacheV2 - val scalacheck = "org.scala-tools.testing" %% "scalacheck" % "1.9" % "test" // New BSD + val commonsColl = "commons-collections" % "commons-collections" % "3.2.1" % "test" // ApacheV2 + val commonsMath = "org.apache.commons" % "commons-math" % "2.1" % "test" // ApacheV2 + val jetty = "org.eclipse.jetty" % "jetty-server" % V.Jetty % "test" // Eclipse license + val jettyWebapp = "org.eclipse.jetty" % "jetty-webapp" % V.Jetty % "test" // Eclipse license + val junit = "junit" % "junit" % "4.5" % "test" // Common Public License 1.0 + val logback = "ch.qos.logback" % "logback-classic" % V.Logback % "test" // EPL 1.0 / LGPL 2.1 + val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" // MIT + val scalatest = "org.scalatest" %% "scalatest" % V.Scalatest % "test" // ApacheV2 + val scalacheck = "org.scala-tools.testing" %% "scalacheck" % "1.9" % "test" // New BSD + val zookeeper = "org.apache.hadoop.zookeeper" % "zookeeper" % V.Zookeeper % "test" // ApacheV2 + val log4j = "log4j" % "log4j" % "1.2.14" % "test" // ApacheV2 } } From 3acdf53f0b509574349c9b346300d38c2c257571 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 16 Jan 2012 15:07:53 +0100 Subject: [PATCH 51/84] After review --- .../src/main/scala/akka/actor/TypedActor.scala | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index f143db0f8a..33e419cf1f 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -54,7 +54,7 @@ trait TypedActorFactory { def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T]): R = { val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver val c = props.creator //Cache this to avoid closing over the Props - val ap = props.actorProps.withCreator(new akka.actor.TypedActor.TypedActor[R, T](proxyVar, c())) + val ap = props.actorProps.withCreator(new TypedActor.TypedActor[R, T](proxyVar, c())) typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap)) } @@ -407,12 +407,13 @@ object TypedProps { * TypedProps is a TypedActor configuration object, that is thread safe and fully sharable. * It's used in TypedActorFactory.typedActorOf to configure a TypedActor instance. */ -case class TypedProps[T <: AnyRef] protected[akka] (interfaces: Seq[Class[_]], - creator: () ⇒ T, - dispatcher: String = TypedProps.defaultDispatcherId, - faultHandler: FaultHandlingStrategy = TypedProps.defaultFaultHandler, - timeout: Option[Timeout] = TypedProps.defaultTimeout, - loader: Option[ClassLoader] = TypedProps.defaultLoader) { +case class TypedProps[T <: AnyRef] protected[TypedProps] ( + interfaces: Seq[Class[_]], + creator: () ⇒ T, + dispatcher: String = TypedProps.defaultDispatcherId, + faultHandler: FaultHandlingStrategy = TypedProps.defaultFaultHandler, + timeout: Option[Timeout] = TypedProps.defaultTimeout, + loader: Option[ClassLoader] = TypedProps.defaultLoader) { /** * Uses the supplied class as the factory for the TypedActor implementation, From e2e23e827a2006e154264cf29d8ed77fd8efbccf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 16 Jan 2012 15:17:06 +0100 Subject: [PATCH 52/84] More review cleanup and extra comments --- akka-actor/src/main/scala/akka/actor/TypedActor.scala | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 33e419cf1f..e397a38cc9 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -560,11 +560,14 @@ class TypedActorExtension(system: ActorSystemImpl) extends TypedActorFactory wit private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T], proxyVar: AtomVar[R], actorRef: ⇒ ActorRef): R = { //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling val actorVar = new AtomVar[ActorRef](null) - val classLoader: ClassLoader = if (props.loader.nonEmpty) props.loader.get else props.interfaces.headOption.map(_.getClassLoader).orNull + val classLoader: ClassLoader = if (props.loader.nonEmpty) props.loader.get else props.interfaces.headOption.map(_.getClassLoader).orNull //If we have no loader, we arbitrarily take the loader of the first interface val proxy = Proxy.newProxyInstance( classLoader, props.interfaces.toArray, - new TypedActorInvocationHandler(this, actorVar, props.timeout.getOrElse(this.settings.ActorTimeout))).asInstanceOf[R] + new TypedActorInvocationHandler( + this, + actorVar, + if (props.timeout.isDefined) props.timeout.get else this.settings.ActorTimeout)).asInstanceOf[R] proxyVar match { case null ⇒ From 84732225f6d6128e1319a011f886866a80fb2e15 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 16 Jan 2012 16:49:55 +0100 Subject: [PATCH 53/84] Implementing support for serialization manifests in TypedActor calls that are serialized --- .../main/scala/akka/actor/TypedActor.scala | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index e397a38cc9..8560822190 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -8,9 +8,8 @@ import akka.japi.{ Creator, Option ⇒ JOption } import java.lang.reflect.{ InvocationTargetException, Method, InvocationHandler, Proxy } import akka.util.{ Duration, Timeout } import java.util.concurrent.atomic.{ AtomicReference ⇒ AtomVar } -import akka.serialization.{ Serializer, Serialization } +import akka.serialization.{ Serializer, Serialization, SerializationExtension } import akka.dispatch._ -import akka.serialization.SerializationExtension import java.util.concurrent.TimeoutException import java.lang.IllegalStateException @@ -124,22 +123,25 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi } catch { case i: InvocationTargetException ⇒ throw i.getTargetException } private def writeReplace(): AnyRef = parameters match { - case null ⇒ SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, null, null) - case ps if ps.length == 0 ⇒ SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, Array[Int](), Array[Array[Byte]]()) + case null ⇒ SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, null) + case ps if ps.length == 0 ⇒ SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, Array()) case ps ⇒ - val serializers: Array[Serializer] = ps map SerializationExtension(Serialization.currentSystem.value).findSerializerFor - val serializedParameters: Array[Array[Byte]] = Array.ofDim[Array[Byte]](serializers.length) - for (i ← 0 until serializers.length) - serializedParameters(i) = serializers(i) toBinary parameters(i) //Mutable for the sake of sanity + val serializedParameters = Array.ofDim[(Int, Class[_], Array[Byte])](ps.length) + for (i ← 0 until ps.length) { + val p = ps(i) + val s = SerializationExtension(Serialization.currentSystem.value).findSerializerFor(p) + val m = if (s.includeManifest) p.getClass else null + serializedParameters(i) = (s.identifier, m, s toBinary parameters(i)) //Mutable for the sake of sanity + } - SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, serializers.map(_.identifier), serializedParameters) + SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, serializedParameters) } } /** * Represents the serialized form of a MethodCall, uses readResolve and writeReplace to marshall the call */ - case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializerIdentifiers: Array[Int], serializedParameters: Array[Array[Byte]]) { + case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializedParameters: Array[(Int, Class[_], Array[Byte])]) { //TODO implement writeObject and readObject to serialize //TODO Possible optimization is to special encode the parameter-types to conserve space @@ -154,8 +156,10 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi case a if a.length == 0 ⇒ Array[AnyRef]() case a ⇒ val deserializedParameters: Array[AnyRef] = Array.ofDim[AnyRef](a.length) //Mutable for the sake of sanity - for (i ← 0 until a.length) - deserializedParameters(i) = serialization.serializerByIdentity(serializerIdentifiers(i)).fromBinary(serializedParameters(i)) + for (i ← 0 until a.length) { + val (sId, manifest, bytes) = a(i) + deserializedParameters(i) = serialization.serializerByIdentity(sId).fromBinary(bytes, Option(manifest)) + } deserializedParameters }) From 13a95ce15bdb20888da1dda422ea296b54616bc2 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 16 Jan 2012 19:45:37 +0100 Subject: [PATCH 54/84] Dealt with a FIXME and removing Lispiness --- akka-actor/src/main/resources/reference.conf | 3 +-- akka-actor/src/main/scala/akka/actor/Actor.scala | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 02d1a49035..de4b46709b 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -30,7 +30,7 @@ akka { # List FQCN of extensions which shall be loaded at actor system startup. # Should be on the format: 'extensions = ["foo", "bar"]' etc. - # FIXME: clarify "extensions" here, "Akka Extensions ()" + # See the Akka Documentation for more info about Extensions extensions = [] actor { @@ -88,7 +88,6 @@ akka { # within is the timeout used for routers containing future calls within = 5 seconds - # FIXME document 'create-as', ticket 1511 create-as { # fully qualified class name of recipe implementation class = "" diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 4681e88cfa..d69696e0b5 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -248,7 +248,7 @@ trait Actor { * up of resources before Actor is terminated. */ def preRestart(reason: Throwable, message: Option[Any]) { - context.children foreach (context.stop(_)) + context.children foreach context.stop postStop() } @@ -279,7 +279,6 @@ trait Actor { // ========================================= private[akka] final def apply(msg: Any) = { - // FIXME this should all go into ActorCell val behaviorStack = context.asInstanceOf[ActorCell].hotswap msg match { case msg if behaviorStack.nonEmpty && behaviorStack.head.isDefinedAt(msg) ⇒ behaviorStack.head.apply(msg) From 1756b6aa54632379b81938cf260aff6d9c87feca Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 16 Jan 2012 20:18:08 +0100 Subject: [PATCH 55/84] FIXMEs, young grasshopper --- .../src/main/scala/akka/actor/ActorCell.scala | 6 ++++-- .../scala/akka/actor/ActorRefProvider.scala | 3 --- .../main/scala/akka/actor/TypedActor.scala | 17 ----------------- .../akka/dispatch/AbstractDispatcher.scala | 4 +--- .../src/main/scala/akka/dispatch/Future.scala | 8 ++++---- .../akka/serialization/Serialization.scala | 19 ------------------- .../akka/util/BoundedBlockingQueue.scala | 6 +++--- .../actor/mailbox/FiledBasedMailbox.scala | 2 +- .../scala/akka/remote/RemoteInterface.scala | 2 +- .../remote/netty/NettyRemoteSupport.scala | 5 ++--- .../scala/akka/routing/RemoteRouters.scala | 2 +- .../main/scala/akka/testkit/TestFSMRef.scala | 4 ++-- .../src/main/scala/akka/testkit/TestKit.scala | 2 +- 13 files changed, 20 insertions(+), 60 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 5454d54d23..a0238fe7c8 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -290,6 +290,8 @@ private[akka] class ActorCell( parent.sendSystemMessage(akka.dispatch.Supervise(self)) dispatcher.attach(this) + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + dispatcher.systemDispatch(this, Create()) } // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ @@ -360,7 +362,7 @@ private[akka] class ActorCell( checkReceiveTimeout if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(created), "started (" + created + ")")) } catch { - // FIXME catching all and continue isn't good for OOME, ticket #1418 + // TODO catching all and continue isn't good for OOME, ticket #1418 case e ⇒ try { system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "error while creating actor")) @@ -394,7 +396,7 @@ private[akka] class ActorCell( props.faultHandler.handleSupervisorRestarted(cause, self, children) } catch { - // FIXME catching all and continue isn't good for OOME, ticket #1418 + // TODO catching all and continue isn't good for OOME, ticket #1418 case e ⇒ try { system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "error while creating actor")) // prevent any further messages to be processed until the actor has been restarted diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index bdcbb3139f..f54df8c50b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -41,10 +41,8 @@ trait ActorRefProvider { */ def deathWatch: DeathWatch - // FIXME: remove/replace??? def nodename: String - // FIXME: remove/replace??? def clustername: String /** @@ -291,7 +289,6 @@ class LocalActorRefProvider( new RootActorPath(LocalAddress(_systemName)), new Deployer(settings)) - // FIXME remove both val nodename: String = "local" val clustername: String = "local" diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 8560822190..b3913b8138 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -544,23 +544,6 @@ class TypedActorExtension(system: ActorSystemImpl) extends TypedActorFactory wit // Private API - private[akka] def configureAndProxyLocalActorRef[T <: AnyRef](supervisor: ActorRefFactory, interfaces: Array[Class[_]], proxyVar: AtomVar[T], props: Props, name: Option[String], loader: ClassLoader): T = { - //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling - val actorVar = new AtomVar[ActorRef](null) - - //FIXME - val timeout = settings.ActorTimeout - /*val timeout = props.timeout match { - case Props.`defaultTimeout` ⇒ settings.ActorTimeout - case x ⇒ x - }*/ - val proxy: T = Proxy.newProxyInstance(loader, interfaces, new TypedActorInvocationHandler(this, actorVar, timeout)).asInstanceOf[T] - proxyVar.set(proxy) // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive - val ref = if (name.isDefined) supervisor.actorOf(props, name.get) else supervisor.actorOf(props) - actorVar.set(ref) //Make sure the InvocationHandler gets ahold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet - proxyVar.get - } - private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T], proxyVar: AtomVar[R], actorRef: ⇒ ActorRef): R = { //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling val actorVar = new AtomVar[ActorRef](null) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index d9b45ea7c8..66014e25cc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -79,7 +79,7 @@ final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cl try { runnable.run() } catch { - // FIXME catching all and continue isn't good for OOME, ticket #1418 + // TODO catching all and continue isn't good for OOME, ticket #1418 case e ⇒ eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage)) } finally { cleanup() @@ -208,8 +208,6 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext */ protected[akka] def register(actor: ActorCell) { inhabitantsUpdater.incrementAndGet(this) - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - systemDispatch(actor, Create()) //FIXME should this be here or moved into ActorCell.start perhaps? } /** diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 0675f1c9f2..746e2f03d8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -148,7 +148,7 @@ object Future { try { Right(body) } catch { - // FIXME catching all and continue isn't good for OOME, ticket #1418 + // TODO catching all and continue isn't good for OOME, ticket #1418 case e ⇒ Left(e) } } @@ -322,7 +322,7 @@ object Future { next.apply() } catch { case e ⇒ - // FIXME catching all and continue isn't good for OOME, ticket #1418 + // TODO catching all and continue isn't good for OOME, ticket #1418 executor match { case m: MessageDispatcher ⇒ m.prerequisites.eventStream.publish(Error(e, "Future.dispatchTask", this.getClass, e.getMessage)) @@ -423,8 +423,8 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { * Creates a Future that will be the result of the first completed Future of this and the Future that was passed into this. * This is semantically the same as: Future.firstCompletedOf(Seq(this, that)) */ - //FIXME implement as The result of any of the Futures, or if oth failed, the first failure - def orElse[A >: T](that: Future[A]): Future[A] = Future.firstCompletedOf(List(this, that)) //TODO Optimize + // TODO ticket #1650 + def orElse[A >: T](that: Future[A]): Future[A] = Future.firstCompletedOf(List(this, that)) /** * Creates a new Future that will handle any matching Throwable that this diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index 7fe3703150..077ff12e1a 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -116,25 +116,6 @@ class Serialization(val system: ActorSystemImpl) extends Extension { def serializerOf(serializerFQN: String): Either[Exception, Serializer] = ReflectiveAccess.createInstance(serializerFQN, ReflectiveAccess.noParams, ReflectiveAccess.noArgs) - /** - * FIXME implement support for this - */ - private def serializerForBestMatchClass(cl: Class[_]): Either[Exception, Serializer] = { - if (bindings.isEmpty) - Left(NoSerializerFoundException("No mapping serializer found for " + cl)) - else { - bindings find { - case (clazzName, _) ⇒ - ReflectiveAccess.getClassFor(clazzName) match { - case Right(clazz) ⇒ clazz.isAssignableFrom(cl) - case _ ⇒ false - } - } map { - case (_, ser) ⇒ serializerOf(ser) - } getOrElse Left(NoSerializerFoundException("No mapping serializer found for " + cl)) - } - } - /** * A Map of serializer from alias to implementation (class implementing akka.serialization.Serializer) * By default always contains the following mapping: "default" -> akka.serialization.JavaSerializer diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index 3c0f386b84..fb93f4a639 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -108,7 +108,7 @@ class BoundedBlockingQueue[E <: AnyRef]( throw ie } false - // FIXME catching all and continue isn't good for OOME, ticket #1418 + // TODO catching all and continue isn't good for OOME, ticket #1418 case e ⇒ notFull.signal() result = e @@ -235,7 +235,7 @@ class BoundedBlockingQueue[E <: AnyRef]( if (backing.removeAll(c)) { val sz = backing.size() if (sz < maxCapacity) notFull.signal() - if (sz > 0) notEmpty.signal() //FIXME needed?? + if (sz > 0) notEmpty.signal() true } else false } finally { @@ -248,7 +248,7 @@ class BoundedBlockingQueue[E <: AnyRef]( try { if (backing.retainAll(c)) { val sz = backing.size() - if (sz < maxCapacity) notFull.signal() //FIXME needed?? + if (sz < maxCapacity) notFull.signal() if (sz > 0) notEmpty.signal() true } else false diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FiledBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FiledBasedMailbox.scala index 2accd9fb20..88bfde0529 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FiledBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FiledBasedMailbox.scala @@ -68,7 +68,7 @@ class FileBasedMailbox(val owner: ActorContext) extends DurableMailbox(owner) wi queue.remove true } catch { - // FIXME catching all and continue isn't good for OOME, ticket #1418 + // TODO catching all and continue isn't good for OOME, ticket #1418 case e ⇒ false } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteInterface.scala b/akka-remote/src/main/scala/akka/remote/RemoteInterface.scala index 9134ca4340..a5474ce427 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteInterface.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteInterface.scala @@ -39,7 +39,7 @@ case class RemoteNettyAddress(host: String, ip: Option[InetAddress], port: Int) object RemoteNettyAddress { def apply(host: String, port: Int): RemoteNettyAddress = { - // FIXME this may BLOCK for extended periods of time! + // TODO ticket #1639 val ip = try Some(InetAddress.getByName(host)) catch { case _: UnknownHostException ⇒ None } new RemoteNettyAddress(host, ip, port) } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 719261a5b6..b9252bd9fc 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -350,8 +350,7 @@ class ActiveRemoteClientHandler( runOnceNow { client.remoteSupport.shutdownClientConnection(remoteAddress) // spawn in another thread } - case e: Exception ⇒ - event.getChannel.close() //FIXME Is this the correct behavior??? + case e: Exception ⇒ event.getChannel.close() } } else client.notifyListeners(RemoteClientError(new Exception("Unknown cause"), client.remoteSupport, client.remoteAddress)) @@ -670,7 +669,7 @@ class RemoteServerHandler( val inbound = RemoteNettyAddress(origin.getHostname, origin.getPort) val client = new PassiveRemoteClient(event.getChannel, remoteSupport, inbound) remoteSupport.bindClient(inbound, client) - case CommandType.SHUTDOWN ⇒ //FIXME Dispose passive connection here, ticket #1410 + case CommandType.SHUTDOWN ⇒ //Will be unbound in channelClosed case _ ⇒ //Unknown command } case _ ⇒ //ignore diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala index 83a64d09a7..2b0a685f32 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala @@ -19,7 +19,7 @@ trait RemoteRouterConfig extends RouterConfig { case x ⇒ throw new ConfigurationException("unparseable remote node " + x) } val node = Stream.continually(nodes).flatten.iterator - val impl = context.system.asInstanceOf[ActorSystemImpl] //FIXME should we rely on this cast to work here? + val impl = context.system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 IndexedSeq.empty[ActorRef] ++ (for (i ← 1 to nrOfInstances) yield { val name = "c" + i val deploy = Deploy("", ConfigFactory.empty(), None, props.routerConfig, RemoteScope(node.next)) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala index d86bab2ea6..7e53b5ea92 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala @@ -88,12 +88,12 @@ class TestFSMRef[S, D, T <: Actor]( object TestFSMRef { def apply[S, D, T <: Actor](factory: ⇒ T)(implicit ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { - val impl = system.asInstanceOf[ActorSystemImpl] //FIXME should we rely on this cast to work here? + val impl = system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 new TestFSMRef(impl, system.dispatchers.prerequisites, Props(creator = () ⇒ factory), impl.guardian.asInstanceOf[InternalActorRef], TestActorRef.randomName) } def apply[S, D, T <: Actor](factory: ⇒ T, name: String)(implicit ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { - val impl = system.asInstanceOf[ActorSystemImpl] //FIXME should we rely on this cast to work here? + val impl = system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 new TestFSMRef(impl, system.dispatchers.prerequisites, Props(creator = () ⇒ factory), impl.guardian.asInstanceOf[InternalActorRef], name) } } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 69411f23ea..85f36a567c 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -102,7 +102,7 @@ class TestKit(_system: ActorSystem) { * registration as message target. */ lazy val testActor: ActorRef = { - val impl = system.asInstanceOf[ActorSystemImpl] //FIXME should we rely on this cast to work here? + val impl = system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 impl.systemActorOf(Props(new TestActor(queue)) .withDispatcher(CallingThreadDispatcher.Id), "testActor" + TestKit.testActorId.incrementAndGet) From 1f4954755d17dadc1c0f2d42b3125044e8c13427 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 01:18:57 +0100 Subject: [PATCH 56/84] Fixing ticket #1650 --- .../test/scala/akka/dispatch/FutureSpec.scala | 2 +- .../src/main/scala/akka/dispatch/Future.scala | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index 4fbb67fbb4..ef9f3b01bc 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -50,7 +50,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa behave like emptyFuture(_(Promise())) "return supplied value on timeout" in { val timedOut = Promise.successful[String]("Timedout") - val promise = Promise[String]() orElse timedOut + val promise = Promise[String]() or timedOut Await.result(promise, timeout.duration) must be("Timedout") } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 746e2f03d8..91c7b2356a 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -420,11 +420,22 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { } /** - * Creates a Future that will be the result of the first completed Future of this and the Future that was passed into this. - * This is semantically the same as: Future.firstCompletedOf(Seq(this, that)) + * Returns a new Future that is either the successful result of this Future, the successful result of that Future, + * or the failure from either this or that. */ - // TODO ticket #1650 - def orElse[A >: T](that: Future[A]): Future[A] = Future.firstCompletedOf(List(this, that)) + def or[U >: T](that: Future[U]): Future[U] = { + val p = Promise[U]() + def register(to: Future[U]) = to onComplete { + case r @ Right(_) ⇒ p tryComplete r + case l @ Left(_) ⇒ that.value match { + case Some(Left(_)) ⇒ p tryComplete l //If he failed, race for setting failure + case _ ⇒ // Either "that" was successful, or he's not done yet, let him win + } + } + register(this) + register(that) + p + } /** * Creates a new Future that will handle any matching Throwable that this From 98c4febac8dcaad0383777ec023be768e3852182 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 01:29:24 +0100 Subject: [PATCH 57/84] Clarified Future.or semantics --- akka-actor/src/main/scala/akka/dispatch/Future.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 91c7b2356a..dcd0f46629 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -421,7 +421,8 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { /** * Returns a new Future that is either the successful result of this Future, the successful result of that Future, - * or the failure from either this or that. + * or the failure from either this or that. In case fails, and the other never completes, + * the returned Future will never be completed. */ def or[U >: T](that: Future[U]): Future[U] = { val p = Promise[U]() From 0b9c8bd248bb7a5de26d730c11d17495ddbdba45 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 01:41:28 +0100 Subject: [PATCH 58/84] Removing legacy configuration message resend shenanigans --- .../reflogic/ClusterActorRefCleanupMultiJvmNode1.conf | 3 +-- .../reflogic/ClusterActorRefCleanupMultiJvmNode2.conf | 1 - .../reflogic/ClusterActorRefCleanupMultiJvmNode3.conf | 1 - akka-remote/src/main/resources/reference.conf | 10 ---------- .../src/test/scala/akka/remote/RemoteConfigSpec.scala | 2 -- 5 files changed, 1 insertion(+), 16 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf index 22bb5fc331..f510c5253c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf @@ -3,5 +3,4 @@ akka.event-handlers = ["akka.testkit.TestEventListener"] akka.event-handler-level = "WARNING" akka.actor.deployment.service-test.router = "round-robin" akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 -akka.remote.client.buffering.retry-message-send-on-failure = false +akka.actor.deployment.service-test.nr-of-instances = 2 \ No newline at end of file diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf index 20e6354a0d..b7c3e53e6f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf @@ -3,4 +3,3 @@ akka.event-handler-level = "WARNING" akka.actor.deployment.service-test.router = "round-robin" akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] akka.actor.deployment.service-test.nr-of-instances = 2 -akka.remote.client.buffering.retry-message-send-on-failure = false diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf index 20e6354a0d..b7c3e53e6f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf @@ -3,4 +3,3 @@ akka.event-handler-level = "WARNING" akka.actor.deployment.service-test.router = "round-robin" akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] akka.actor.deployment.service-test.nr-of-instances = 2 -akka.remote.client.buffering.retry-message-send-on-failure = false diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 3399f68639..b3d2027d91 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -128,16 +128,6 @@ akka { } client { - buffering { - # Should message buffering on remote client error be used (buffer flushed - # on successful reconnect) - retry-message-send-on-failure = off - - # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set using - # the property - capacity = -1 - } reconnect-delay = 5s read-timeout = 3600s message-frame-size = 1 MiB diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index cd8c3c8eb5..b1a9905b6e 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -34,8 +34,6 @@ class RemoteConfigSpec extends AkkaSpec("akka.cluster.nodename = node1") { getBytes("akka.remote.server.max-total-memory-size") must equal(0) //akka.remote.client - getBoolean("akka.remote.client.buffering.retry-message-send-on-failure") must equal(false) - getInt("akka.remote.client.buffering.capacity") must equal(-1) getMilliseconds("akka.remote.client.reconnect-delay") must equal(5 * 1000) getMilliseconds("akka.remote.client.read-timeout") must equal(3600 * 1000) getMilliseconds("akka.remote.client.reconnection-time-window") must equal(600 * 1000) From 0cf5c22eac47f3d07213c153c793445f617589e7 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 08:44:46 +0100 Subject: [PATCH 59/84] Fixed target.nodes config property. See #1599 --- akka-docs/java/remoting.rst | 8 ++++---- akka-docs/scala/remoting.rst | 6 +++--- akka-remote/src/main/resources/reference.conf | 2 +- .../{RemoteRouters.scala => RemoteRouterConfig.scala} | 0 4 files changed, 8 insertions(+), 8 deletions(-) rename akka-remote/src/main/scala/akka/routing/{RemoteRouters.scala => RemoteRouterConfig.scala} (100%) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index 7ec163726a..909315af46 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -69,7 +69,7 @@ The "app" in this case refers to the name of the ``ActorSystem``:: actor { deployment { /serviceA/retrieval { - remote = “akka://app@10.0.0.1:2552” + remote = "akka://app@10.0.0.1:2552” } } } @@ -106,10 +106,10 @@ This is also done via configuration:: actor { deployment { /serviceA/aggregation { - router = “round-robin” + router = "round-robin" nr-of-instances = 10 - routees { - nodes = [“akka://app@10.0.0.2:2552”, “akka://app@10.0.0.3:2552”] + target { + nodes = ["akka://app@10.0.0.2:2552", "akka://app@10.0.0.3:2552"] } } } diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 5a8005fbf2..2d460aa060 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -124,10 +124,10 @@ This is also done via configuration:: actor { deployment { /serviceA/aggregation { - router = “round-robin” + router = "round-robin" nr-of-instances = 10 - routees { - nodes = [“akka://app@10.0.0.2:2552”, “akka://app@10.0.0.3:2552”] + target { + nodes = ["akka://app@10.0.0.2:2552", "akka://app@10.0.0.3:2552"] } } } diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index b3d2027d91..7456b4f3d9 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -17,7 +17,7 @@ akka { # at that node e.g. "akka://sys@host:port" remote = "" - routees { + target { # A list of hostnames and ports for instantiating the children of a # non-direct router diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala similarity index 100% rename from akka-remote/src/main/scala/akka/routing/RemoteRouters.scala rename to akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala From 7b609762275b9efac51ddb2b173c664e239b1aaa Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 08:59:57 +0100 Subject: [PATCH 60/84] Fixing glitch discovered by Derek --- akka-actor/src/main/scala/akka/dispatch/Future.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index dcd0f46629..75043065a0 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -426,15 +426,15 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { */ def or[U >: T](that: Future[U]): Future[U] = { val p = Promise[U]() - def register(to: Future[U]) = to onComplete { + def register(to: Future[U], fallback: Future[U]) = to onComplete { case r @ Right(_) ⇒ p tryComplete r - case l @ Left(_) ⇒ that.value match { + case l @ Left(_) ⇒ fallback.value match { case Some(Left(_)) ⇒ p tryComplete l //If he failed, race for setting failure case _ ⇒ // Either "that" was successful, or he's not done yet, let him win } } - register(this) - register(that) + register(this, that) + register(that, this) p } From e7a0247c0d481447facc1d35810c4a9b3125512b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 08:45:07 +0100 Subject: [PATCH 61/84] Handle remote routers transparently. See #1606 * RemoteRouterConfig wrapper with RemoteRouteeProvider instead if fixed remote routers. * Had to refactor and introduce RouteeProvider for different implementations of how to create routees. * Works with Resizer also. * Added some tests. --- .../test/scala/akka/routing/RoutingSpec.scala | 14 +- .../src/main/scala/akka/routing/Routing.scala | 169 ++++++++------- .../jrouting/CustomRouterDocTestBase.java | 8 +- .../scala/akka/remote/RemoteDeployer.scala | 9 +- .../akka/routing/RemoteRouterConfig.scala | 204 +++++------------- .../scala/akka/remote/RemoteRouterSpec.scala | 28 ++- 6 files changed, 189 insertions(+), 243 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index a9ec39ff6e..910985151d 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -45,9 +45,9 @@ object RoutingSpec { class MyRouter(config: Config) extends RouterConfig { val foo = config.getString("foo") - def createRoute(routeeProps: Props, actorContext: ActorContext): Route = { - val routees = IndexedSeq(actorContext.actorOf(Props[Echo])) - registerRoutees(actorContext, routees) + def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route = { + val routees = IndexedSeq(routeeProvider.context.actorOf(Props[Echo])) + routeeProvider.registerRoutees(routees) { case (sender, message) ⇒ Nil @@ -542,13 +542,13 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with case class VoteCountRouter() extends RouterConfig { //#crRoute - def createRoute(routeeProps: Props, actorContext: ActorContext): Route = { - val democratActor = actorContext.actorOf(Props(new DemocratActor()), "d") - val republicanActor = actorContext.actorOf(Props(new RepublicanActor()), "r") + def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route = { + val democratActor = routeeProvider.context.actorOf(Props(new DemocratActor()), "d") + val republicanActor = routeeProvider.context.actorOf(Props(new RepublicanActor()), "r") val routees = Vector[ActorRef](democratActor, republicanActor) //#crRegisterRoutees - registerRoutees(actorContext, routees) + routeeProvider.registerRoutees(routees) //#crRegisterRoutees //#crRoutingLogic diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 0473f99fd6..dc251145d7 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -44,7 +44,8 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup abandonedRoutees foreach underlying.unwatch } - val route = _props.routerConfig.createRoute(routeeProps, actorContext) + private val routeeProvider = _props.routerConfig.createRouteeProvider(this, actorContext) + val route = _props.routerConfig.createRoute(routeeProps, routeeProvider) // initial resize, before message send resize() @@ -91,7 +92,7 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup for (r ← _props.routerConfig.resizer) { if (r.isTimeForResize(resizeCounter.getAndIncrement()) && resizeProgress.compareAndSet(false, true)) { try { - r.resize(routeeProps, actorContext, routees, _props.routerConfig) + r.resize(routeeProps, routeeProvider) } finally { resizeProgress.set(false) } @@ -120,7 +121,10 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup */ trait RouterConfig { - def createRoute(routeeProps: Props, actorContext: ActorContext): Route + def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route + + protected[akka] def createRouteeProvider(ref: RoutedActorRef, context: ActorContext) = + new RouteeProvider(ref, context, resizer) def createActor(): Router = new Router {} @@ -134,32 +138,6 @@ trait RouterConfig { protected def toAll(sender: ActorRef, routees: Iterable[ActorRef]): Iterable[Destination] = routees.map(Destination(sender, _)) - def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { - case (0, Nil) ⇒ throw new IllegalArgumentException("Insufficient information - missing configuration.") - case (x, Nil) ⇒ (1 to x).map(_ ⇒ context.actorOf(props))(scala.collection.breakOut) - case (_, xs) ⇒ xs.map(context.actorFor(_))(scala.collection.breakOut) - } - - protected def createAndRegisterRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): Unit = { - if (resizer.isEmpty) { - registerRoutees(context, createRoutees(props, context, nrOfInstances, routees)) - } - } - - /** - * Adds new routees to the router. - */ - def registerRoutees(context: ActorContext, routees: IndexedSeq[ActorRef]): Unit = { - context.self.asInstanceOf[RoutedActorRef].addRoutees(routees) - } - - /** - * Removes routees from the router. This method doesn't stop the routees. - */ - def unregisterRoutees(context: ActorContext, routees: IndexedSeq[ActorRef]): Unit = { - context.self.asInstanceOf[RoutedActorRef].removeRoutees(routees) - } - /** * Routers with dynamically resizable number of routees return the [[akka.routing.Resizer]] * to use. @@ -168,26 +146,69 @@ trait RouterConfig { } +/** + * Factory and registry for routees of the router. + * Uses `context.actorOf` to create routees from nrOfInstances property + * and `context.actorFor` lookup routees from paths. + */ +class RouteeProvider(ref: RoutedActorRef, val context: ActorContext, val resizer: Option[Resizer]) { + /** + * Adds new routees to the router. + */ + def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = { + context.self.asInstanceOf[RoutedActorRef].addRoutees(routees) + } + + /** + * Adds new routees to the router. + * Java API. + */ + protected def registerRoutees(routees: java.util.List[ActorRef]): Unit = { + import scala.collection.JavaConverters._ + registerRoutees(routees.asScala.toIndexedSeq) + } + + /** + * Removes routees from the router. This method doesn't stop the routees. + */ + def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = { + context.self.asInstanceOf[RoutedActorRef].removeRoutees(routees) + } + + def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { + case (0, Nil) ⇒ throw new IllegalArgumentException("Insufficient information - missing configuration.") + case (x, Nil) ⇒ (1 to x).map(_ ⇒ context.actorOf(props))(scala.collection.breakOut) + case (_, xs) ⇒ xs.map(context.actorFor(_))(scala.collection.breakOut) + } + + def createAndRegisterRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): Unit = { + if (resizer.isEmpty) { + registerRoutees(createRoutees(props, nrOfInstances, routees)) + } + } + + /** + * All routees of the router + */ + def routees: IndexedSeq[ActorRef] = ref.routees + +} + /** * Java API for a custom router factory. * @see akka.routing.RouterConfig */ abstract class CustomRouterConfig extends RouterConfig { - override def createRoute(props: Props, context: ActorContext): Route = { + override def createRoute(props: Props, routeeProvider: RouteeProvider): Route = { // as a bonus, this prevents closing of props and context in the returned Route PartialFunction - val customRoute = createCustomRoute(props, context) + val customRoute = createCustomRoute(props, routeeProvider) { case (sender, message) ⇒ customRoute.destinationsFor(sender, message) } } - def createCustomRoute(props: Props, context: ActorContext): CustomRoute - - protected def registerRoutees(context: ActorContext, routees: java.util.List[ActorRef]): Unit = { - import scala.collection.JavaConverters._ - registerRoutees(context, routees.asScala.toIndexedSeq) - } + def createCustomRoute(props: Props, routeeProvider: RouteeProvider): CustomRoute } @@ -254,23 +275,23 @@ case class Destination(sender: ActorRef, recipient: ActorRef) * Oxymoron style. */ case object NoRouter extends RouterConfig { - def createRoute(props: Props, actorContext: ActorContext): Route = null + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = null } /** * Router configuration which has no default, i.e. external configuration is required. */ case object FromConfig extends RouterConfig { - def createRoute(props: Props, actorContext: ActorContext): Route = - throw new ConfigurationException("router " + actorContext.self + " needs external configuration from file (e.g. application.conf)") + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = + throw new ConfigurationException("router " + routeeProvider.context.self + " needs external configuration from file (e.g. application.conf)") } /** * Java API: Router configuration which has no default, i.e. external configuration is required. */ case class FromConfig() extends RouterConfig { - def createRoute(props: Props, actorContext: ActorContext): Route = - throw new ConfigurationException("router " + actorContext.self + " needs external configuration from file (e.g. application.conf)") + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = + throw new ConfigurationException("router " + routeeProvider.context.self + " needs external configuration from file (e.g. application.conf)") } object RoundRobinRouter { @@ -332,21 +353,20 @@ trait RoundRobinLike { this: RouterConfig ⇒ def routees: Iterable[String] - def createRoute(props: Props, context: ActorContext): Route = { - createAndRegisterRoutees(props, context, nrOfInstances, routees) + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = { + routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees) - val ref = context.self.asInstanceOf[RoutedActorRef] val next = new AtomicLong(0) def getNext(): ActorRef = { - val _routees = ref.routees + val _routees = routeeProvider.routees _routees((next.getAndIncrement % _routees.size).asInstanceOf[Int]) } { case (sender, message) ⇒ message match { - case Broadcast(msg) ⇒ toAll(sender, ref.routees) + case Broadcast(msg) ⇒ toAll(sender, routeeProvider.routees) case msg ⇒ List(Destination(sender, getNext())) } } @@ -418,18 +438,18 @@ trait RandomLike { this: RouterConfig ⇒ override def initialValue = SecureRandom.getInstance("SHA1PRNG") } - def createRoute(props: Props, context: ActorContext): Route = { - val ref = context.self.asInstanceOf[RoutedActorRef] - createAndRegisterRoutees(props, context, nrOfInstances, routees) + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = { + routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees) def getNext(): ActorRef = { - ref.routees(random.get.nextInt(ref.routees.size)) + val _routees = routeeProvider.routees + _routees(random.get.nextInt(_routees.size)) } { case (sender, message) ⇒ message match { - case Broadcast(msg) ⇒ toAll(sender, ref.routees) + case Broadcast(msg) ⇒ toAll(sender, routeeProvider.routees) case msg ⇒ List(Destination(sender, getNext())) } } @@ -559,13 +579,12 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ case _ ⇒ 0 } - def createRoute(props: Props, context: ActorContext): Route = { - val ref = context.self.asInstanceOf[RoutedActorRef] - createAndRegisterRoutees(props, context, nrOfInstances, routees) + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = { + routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees) def getNext(): ActorRef = { // non-local actors mailbox size is unknown, so consider them lowest priority - val activeLocal = ref.routees collect { case l: LocalActorRef if !isSuspended(l) ⇒ l } + val activeLocal = routeeProvider.routees collect { case l: LocalActorRef if !isSuspended(l) ⇒ l } // 1. anyone not processing message and with empty mailbox activeLocal.find(a ⇒ !isProcessingMessage(a) && !hasMessages(a)) getOrElse { // 2. anyone with empty mailbox @@ -573,7 +592,8 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ // 3. sort on mailbox size activeLocal.sortBy(a ⇒ numberOfMessages(a)).headOption getOrElse { // 4. no locals, just pick one, random - ref.routees(random.get.nextInt(ref.routees.size)) + val _routees = routeeProvider.routees + _routees(random.get.nextInt(_routees.size)) } } } @@ -582,7 +602,7 @@ trait SmallestMailboxLike { this: RouterConfig ⇒ { case (sender, message) ⇒ message match { - case Broadcast(msg) ⇒ toAll(sender, ref.routees) + case Broadcast(msg) ⇒ toAll(sender, routeeProvider.routees) case msg ⇒ List(Destination(sender, getNext())) } } @@ -649,14 +669,13 @@ trait BroadcastLike { this: RouterConfig ⇒ def routees: Iterable[String] - def createRoute(props: Props, context: ActorContext): Route = { - val ref = context.self.asInstanceOf[RoutedActorRef] - createAndRegisterRoutees(props, context, nrOfInstances, routees) + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = { + routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees) { case (sender, message) ⇒ message match { - case _ ⇒ toAll(sender, ref.routees) + case _ ⇒ toAll(sender, routeeProvider.routees) } } } @@ -724,16 +743,15 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ def within: Duration - def createRoute(props: Props, context: ActorContext): Route = { - val ref = context.self.asInstanceOf[RoutedActorRef] - createAndRegisterRoutees(props, context, nrOfInstances, routees) + def createRoute(props: Props, routeeProvider: RouteeProvider): Route = { + routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees) { case (sender, message) ⇒ - val asker = context.asInstanceOf[ActorCell].systemImpl.provider.ask(Timeout(within)).get + val asker = routeeProvider.context.asInstanceOf[ActorCell].systemImpl.provider.ask(Timeout(within)).get asker.result.pipeTo(sender) message match { - case _ ⇒ toAll(asker, ref.routees) + case _ ⇒ toAll(asker, routeeProvider.routees) } } } @@ -755,11 +773,11 @@ trait Resizer { /** * Decide if the capacity of the router need to be changed. Will be invoked when `isTimeForResize` * returns true and no other resize is in progress. - * Create and register more routees with `routerConfig.registerRoutees(actorContext, newRoutees) - * or remove routees with `routerConfig.unregisterRoutees(actorContext, abandonedRoutees)` and + * Create and register more routees with `routeeProvider.registerRoutees(newRoutees) + * or remove routees with `routeeProvider.unregisterRoutees(abandonedRoutees)` and * sending [[akka.actor.PoisonPill]] to them. */ - def resize(props: Props, actorContext: ActorContext, currentRoutees: IndexedSeq[ActorRef], routerConfig: RouterConfig) + def resize(props: Props, routeeProvider: RouteeProvider) } case object DefaultResizer { @@ -849,16 +867,17 @@ case class DefaultResizer( def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % messagesPerResize == 0) - def resize(props: Props, actorContext: ActorContext, currentRoutees: IndexedSeq[ActorRef], routerConfig: RouterConfig) { + def resize(props: Props, routeeProvider: RouteeProvider) { + val currentRoutees = routeeProvider.routees val requestedCapacity = capacity(currentRoutees) if (requestedCapacity > 0) { - val newRoutees = routerConfig.createRoutees(props, actorContext, requestedCapacity, Nil) - routerConfig.registerRoutees(actorContext, newRoutees) + val newRoutees = routeeProvider.createRoutees(props, requestedCapacity, Nil) + routeeProvider.registerRoutees(newRoutees) } else if (requestedCapacity < 0) { val (keep, abandon) = currentRoutees.splitAt(currentRoutees.length + requestedCapacity) - routerConfig.unregisterRoutees(actorContext, abandon) - delayedStop(actorContext.system.scheduler, abandon) + routeeProvider.unregisterRoutees(abandon) + delayedStop(routeeProvider.context.system.scheduler, abandon) } } diff --git a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java b/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java index c89401e5cc..3668bc1030 100644 --- a/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java +++ b/akka-docs/java/code/akka/docs/jrouting/CustomRouterDocTestBase.java @@ -107,13 +107,13 @@ public class CustomRouterDocTestBase { //#crRoute @Override - public CustomRoute createCustomRoute(Props props, ActorContext context) { - final ActorRef democratActor = context.actorOf(new Props(DemocratActor.class), "d"); - final ActorRef republicanActor = context.actorOf(new Props(RepublicanActor.class), "r"); + public CustomRoute createCustomRoute(Props props, RouteeProvider routeeProvider) { + final ActorRef democratActor = routeeProvider.context().actorOf(new Props(DemocratActor.class), "d"); + final ActorRef republicanActor = routeeProvider.context().actorOf(new Props(RepublicanActor.class), "r"); List routees = Arrays.asList(new ActorRef[] { democratActor, republicanActor }); //#crRegisterRoutees - registerRoutees(context, routees); + routeeProvider.registerRoutees(routees); //#crRegisterRoutees //#crRoutingLogic diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index fe6844b8dc..120a2b87bb 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -14,7 +14,6 @@ class RemoteDeployer(_settings: ActorSystem.Settings) extends Deployer(_settings override protected def parseConfig(path: String, config: Config): Option[Deploy] = { import scala.collection.JavaConverters._ - import akka.util.ReflectiveAccess._ super.parseConfig(path, config) match { case d @ Some(deploy) ⇒ @@ -25,13 +24,7 @@ class RemoteDeployer(_settings: ActorSystem.Settings) extends Deployer(_settings val nodes = deploy.config.getStringList("target.nodes").asScala if (nodes.isEmpty || deploy.routing == NoRouter) d else { - val r = deploy.routing match { - case RoundRobinRouter(x, _, resizer) ⇒ RemoteRoundRobinRouter(x, nodes, resizer) - case RandomRouter(x, _, resizer) ⇒ RemoteRandomRouter(x, nodes, resizer) - case SmallestMailboxRouter(x, _, resizer) ⇒ RemoteSmallestMailboxRouter(x, nodes, resizer) - case BroadcastRouter(x, _, resizer) ⇒ RemoteBroadcastRouter(x, nodes, resizer) - case ScatterGatherFirstCompletedRouter(x, _, w, resizer) ⇒ RemoteScatterGatherFirstCompletedRouter(x, nodes, w, resizer) - } + val r = new RemoteRouterConfig(deploy.routing, nodes) Some(deploy.copy(routing = r)) } } diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala index 2b0a685f32..7cc47caa77 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala @@ -3,163 +3,71 @@ */ package akka.routing -import akka.actor._ -import akka.remote._ -import scala.collection.JavaConverters._ import com.typesafe.config.ConfigFactory +import akka.actor.ActorContext +import akka.actor.ActorRef +import akka.actor.ActorSystemImpl +import akka.actor.Deploy +import akka.actor.InternalActorRef +import akka.actor.Props import akka.config.ConfigurationException -import akka.util.Duration +import akka.remote.RemoteScope +import akka.remote.RemoteAddressExtractor -trait RemoteRouterConfig extends RouterConfig { - override def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { - case (_, Nil) ⇒ throw new ConfigurationException("must specify list of remote nodes") - case (n, xs) ⇒ - val nodes = routees map { - case RemoteAddressExtractor(a) ⇒ a - case x ⇒ throw new ConfigurationException("unparseable remote node " + x) - } - val node = Stream.continually(nodes).flatten.iterator - val impl = context.system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 - IndexedSeq.empty[ActorRef] ++ (for (i ← 1 to nrOfInstances) yield { - val name = "c" + i - val deploy = Deploy("", ConfigFactory.empty(), None, props.routerConfig, RemoteScope(node.next)) - impl.provider.actorOf(impl, props, context.self.asInstanceOf[InternalActorRef], context.self.path / name, false, Some(deploy)) - }) +/** + * [[akka.routing.RouterConfig]] implementation for remote deployment on defined + * target nodes. Delegates other duties to the local [[akka.routing.RouterConfig]], + * which makes it possible to mix this with the built-in routers such as + * [[akka.routing.RoundRobinRouter]] or custom routers. + */ +class RemoteRouterConfig(local: RouterConfig, nodes: Iterable[String]) extends RouterConfig { + + override protected[akka] def createRouteeProvider(ref: RoutedActorRef, context: ActorContext) = + new RemoteRouteeProvider(nodes, ref, context, resizer) + + override def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route = { + local.createRoute(routeeProps, routeeProvider) } + + override def createActor(): Router = local.createActor() + + override def resizer: Option[Resizer] = local.resizer + } /** - * A Router that uses round-robin to select a connection. For concurrent calls, round robin is just a best effort. - *
    - * Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the round robin should both create new actors and use the 'routees' actor(s). - * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. - *
    - * The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. + * Factory and registry for routees of the router. + * Deploys new routees on the specified `nodes`, round-robin. + * + * Routee paths may not be combined with remote target nodes. */ -case class RemoteRoundRobinRouter(nrOfInstances: Int, routees: Iterable[String], override val resizer: Option[Resizer] = None) - extends RemoteRouterConfig with RoundRobinLike { +class RemoteRouteeProvider(nodes: Iterable[String], _ref: RoutedActorRef, _context: ActorContext, _resizer: Option[Resizer]) + extends RouteeProvider(_ref, _context, _resizer) { - /** - * Constructor that sets the routees to be used. - * Java API - */ - def this(n: Int, t: java.lang.Iterable[String]) = this(n, t.asScala) + // need this iterator as instance variable since Resizer may call createRoutees several times + private val nodeAddressIter = { + val nodeAddresses = nodes map { + case RemoteAddressExtractor(a) ⇒ a + case x ⇒ throw new ConfigurationException("unparseable remote node " + x) + } + Stream.continually(nodeAddresses).flatten.iterator + } - /** - * Constructor that sets the resizer to be used. - * Java API - */ - def this(resizer: Resizer) = this(0, Nil, Some(resizer)) + override def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = + (nrOfInstances, routees, nodes) match { + case (_, _, Nil) ⇒ throw new ConfigurationException("Must specify list of remote target.nodes for [%s]" + format context.self.path.toString) + + case (n, Nil, ys) ⇒ + val impl = context.system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 + IndexedSeq.empty[ActorRef] ++ (for (i ← 1 to nrOfInstances) yield { + val name = "c" + i + val deploy = Deploy("", ConfigFactory.empty(), None, props.routerConfig, RemoteScope(nodeAddressIter.next)) + impl.provider.actorOf(impl, props, context.self.asInstanceOf[InternalActorRef], context.self.path / name, false, Some(deploy)) + }) + + case (_, xs, _) ⇒ throw new ConfigurationException("Remote target.nodes can not be combined with routees for [%s]" + format context.self.path.toString) + } } -/** - * A Router that randomly selects one of the target connections to send a message to. - *
    - * Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the random router should both create new actors and use the 'routees' actor(s). - * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. - *
    - * The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. - */ -case class RemoteRandomRouter(nrOfInstances: Int, routees: Iterable[String], override val resizer: Option[Resizer] = None) - extends RemoteRouterConfig with RandomLike { - - /** - * Constructor that sets the routees to be used. - * Java API - */ - def this(n: Int, t: java.lang.Iterable[String]) = this(n, t.asScala) - - /** - * Constructor that sets the resizer to be used. - * Java API - */ - def this(resizer: Resizer) = this(0, Nil, Some(resizer)) -} - -/** - * A Router that tries to send to routee with fewest messages in mailbox. - *
    - * Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the random router should both create new actors and use the 'routees' actor(s). - * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. - *
    - * The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. - */ -case class RemoteSmallestMailboxRouter(nrOfInstances: Int, routees: Iterable[String], override val resizer: Option[Resizer] = None) - extends RemoteRouterConfig with SmallestMailboxLike { - - /** - * Constructor that sets the routees to be used. - * Java API - */ - def this(n: Int, t: java.lang.Iterable[String]) = this(n, t.asScala) - - /** - * Constructor that sets the resizer to be used. - * Java API - */ - def this(resizer: Resizer) = this(0, Nil, Some(resizer)) -} - -/** - * A Router that uses broadcasts a message to all its connections. - *
    - * Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the random router should both create new actors and use the 'routees' actor(s). - * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. - *
    - * The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. - */ -case class RemoteBroadcastRouter(nrOfInstances: Int, routees: Iterable[String], override val resizer: Option[Resizer] = None) - extends RemoteRouterConfig with BroadcastLike { - - /** - * Constructor that sets the routees to be used. - * Java API - */ - def this(n: Int, t: java.lang.Iterable[String]) = this(n, t.asScala) - - /** - * Constructor that sets the resizer to be used. - * Java API - */ - def this(resizer: Resizer) = this(0, Nil, Some(resizer)) -} - -/** - * Simple router that broadcasts the message to all routees, and replies with the first response. - *
    - * Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means - * that the random router should both create new actors and use the 'routees' actor(s). - * In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. - *
    - * The configuration parameter trumps the constructor arguments. This means that - * if you provide either 'nrOfInstances' or 'routees' to during instantiation they will - * be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used. - */ -case class RemoteScatterGatherFirstCompletedRouter(nrOfInstances: Int, routees: Iterable[String], within: Duration, - override val resizer: Option[Resizer] = None) - extends RemoteRouterConfig with ScatterGatherFirstCompletedLike { - - /** - * Constructor that sets the routees to be used. - * Java API - */ - def this(n: Int, t: java.lang.Iterable[String], w: Duration) = this(n, t.asScala, w) - - /** - * Constructor that sets the resizer to be used. - * Java API - */ - def this(resizer: Resizer, w: Duration) = this(0, Nil, w, Some(resizer)) -} diff --git a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala index f183a940a7..203d077d52 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala @@ -16,6 +16,7 @@ object RemoteRouterSpec { } } +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class RemoteRouterSpec extends AkkaSpec(""" akka { actor.provider = "akka.remote.RemoteActorRefProvider" @@ -26,10 +27,18 @@ akka { } actor.deployment { /blub { - router = "round-robin" + router = round-robin nr-of-instances = 2 target.nodes = ["akka://remote_sys@localhost:12346"] } + /elastic-blub { + router = round-robin + resizer { + lower-bound = 2 + upper-bound = 3 + } + target.nodes = ["akka://remote_sys@localhost:12346"] + } } } """) with ImplicitSender { @@ -53,6 +62,23 @@ akka { expectMsgType[ActorPath].toString must be === "akka://remote_sys@localhost:12346/remote/RemoteRouterSpec@localhost:12345/user/blub/c2" } + "deploy its children on remote host driven by programatic definition" in { + val router = system.actorOf(Props[Echo].withRouter(new RemoteRouterConfig(RoundRobinRouter(2), + Seq("akka://remote_sys@localhost:12346"))), "blub2") + router ! "" + expectMsgType[ActorPath].toString must be === "akka://remote_sys@localhost:12346/remote/RemoteRouterSpec@localhost:12345/user/blub2/c1" + router ! "" + expectMsgType[ActorPath].toString must be === "akka://remote_sys@localhost:12346/remote/RemoteRouterSpec@localhost:12345/user/blub2/c2" + } + + "deploy dynamic resizable number of children on remote host driven by configuration" in { + val router = system.actorOf(Props[Echo].withRouter(FromConfig), "elastic-blub") + router ! "" + expectMsgType[ActorPath].toString must be === "akka://remote_sys@localhost:12346/remote/RemoteRouterSpec@localhost:12345/user/elastic-blub/c1" + router ! "" + expectMsgType[ActorPath].toString must be === "akka://remote_sys@localhost:12346/remote/RemoteRouterSpec@localhost:12345/user/elastic-blub/c2" + } + } } From ac886a8660761ea3f8ca093b7edd9b756c0a5915 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 09:34:34 +0100 Subject: [PATCH 62/84] Moved DefaultScheduler to Scheduler.scala file --- .../scala/akka/actor/ActorRefProvider.scala | 149 ----------------- .../src/main/scala/akka/actor/Scheduler.scala | 151 ++++++++++++++++++ 2 files changed, 151 insertions(+), 149 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index f54df8c50b..21a3f26a58 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -5,14 +5,12 @@ package akka.actor import java.util.concurrent.atomic.AtomicLong -import org.jboss.netty.akka.util.{ Timer, TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout } import akka.config.ConfigurationException import akka.dispatch._ import akka.routing._ import akka.AkkaException import akka.util.{ Duration, Switch, Helpers, Timeout } import akka.event._ -import java.io.Closeable /** * Interface for all ActorRef providers to implement. @@ -523,150 +521,3 @@ class LocalDeathWatch(val mapSize: Int) extends DeathWatch with ActorClassificat } } -/** - * Scheduled tasks (Runnable and functions) are executed with the supplied dispatcher. - * Note that dispatcher is by-name parameter, because dispatcher might not be initialized - * when the scheduler is created. - * - * The HashedWheelTimer used by this class MUST throw an IllegalStateException - * if it does not enqueue a task. Once a task is queued, it MUST be executed or - * returned from stop(). - */ -class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, dispatcher: ⇒ MessageDispatcher) extends Scheduler with Closeable { - - def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = { - val continuousCancellable = new ContinuousCancellable - val task = new TimerTask with ContinuousScheduling { - def run(timeout: HWTimeout) { - receiver ! message - // Check if the receiver is still alive and kicking before reschedule the task - if (receiver.isTerminated) { - log.warning("Could not reschedule message to be sent because receiving actor has been terminated.") - } else { - scheduleNext(timeout, delay, continuousCancellable) - } - } - } - continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) - continuousCancellable - } - - def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = { - - val continuousCancellable = new ContinuousCancellable - val task = new TimerTask with ContinuousScheduling with Runnable { - def run = f - def run(timeout: HWTimeout) { - dispatcher execute this - scheduleNext(timeout, delay, continuousCancellable) - } - } - continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) - continuousCancellable - } - - def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = { - val continuousCancellable = new ContinuousCancellable - val task = new TimerTask with ContinuousScheduling { - def run(timeout: HWTimeout) { - dispatcher.execute(runnable) - scheduleNext(timeout, delay, continuousCancellable) - } - } - continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) - continuousCancellable - } - - def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = { - val task = new TimerTask() { - def run(timeout: HWTimeout) { dispatcher.execute(runnable) } - } - new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) - } - - def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = { - val task = new TimerTask { - def run(timeout: HWTimeout) { - receiver ! message - } - } - new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) - } - - def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = { - val task = new TimerTask { - def run(timeout: HWTimeout) { - dispatcher.execute(new Runnable { def run = f }) - } - } - new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) - } - - private trait ContinuousScheduling { this: TimerTask ⇒ - def scheduleNext(timeout: HWTimeout, delay: Duration, delegator: ContinuousCancellable) { - try { - delegator.swap(timeout.getTimer.newTimeout(this, delay)) - } catch { - case _: IllegalStateException ⇒ // stop recurring if timer is stopped - } - } - } - - private def execDirectly(t: HWTimeout): Unit = { - try t.getTask.run(t) catch { - case e: InterruptedException ⇒ throw e - case e: Exception ⇒ log.error(e, "exception while executing timer task") - } - } - - def close() = { - import scala.collection.JavaConverters._ - hashedWheelTimer.stop().asScala foreach execDirectly - } -} - -/** - * Wrapper of a [[org.jboss.netty.akka.util.Timeout]] that delegates all - * methods. Needed to be able to cancel continuous tasks, - * since they create new Timeout for each tick. - */ -private[akka] class ContinuousCancellable extends Cancellable { - @volatile - private var delegate: HWTimeout = _ - @volatile - private var cancelled = false - - private[akka] def init(initialTimeout: HWTimeout): Unit = { - delegate = initialTimeout - } - - private[akka] def swap(newTimeout: HWTimeout): Unit = { - val wasCancelled = isCancelled - delegate = newTimeout - if (wasCancelled || isCancelled) cancel() - } - - def isCancelled(): Boolean = { - // delegate is initially null, but this object will not be exposed to the world until after init - cancelled || delegate.isCancelled() - } - - def cancel(): Unit = { - // the underlying Timeout will not become cancelled once the task has been started to run, - // therefore we keep a flag here to make sure that rescheduling doesn't occur when cancelled - cancelled = true - // delegate is initially null, but this object will not be exposed to the world until after init - delegate.cancel() - } -} - -class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { - def cancel() { - timeout.cancel() - } - - def isCancelled: Boolean = { - timeout.isCancelled - } -} - diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index a7d4376114..4fc9bf681b 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -13,6 +13,11 @@ package akka.actor import akka.util.Duration +import org.jboss.netty.akka.util.{ Timer, TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout } +import akka.event.LoggingAdapter +import akka.dispatch.MessageDispatcher +import java.io.Closeable + //#scheduler /** * An Akka scheduler service. This one needs one special behavior: if @@ -108,3 +113,149 @@ trait Cancellable { def isCancelled: Boolean } //#cancellable + +/** + * Scheduled tasks (Runnable and functions) are executed with the supplied dispatcher. + * Note that dispatcher is by-name parameter, because dispatcher might not be initialized + * when the scheduler is created. + * + * The HashedWheelTimer used by this class MUST throw an IllegalStateException + * if it does not enqueue a task. Once a task is queued, it MUST be executed or + * returned from stop(). + */ +class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, dispatcher: ⇒ MessageDispatcher) extends Scheduler with Closeable { + + def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = { + val continuousCancellable = new ContinuousCancellable + val task = new TimerTask with ContinuousScheduling { + def run(timeout: HWTimeout) { + receiver ! message + // Check if the receiver is still alive and kicking before reschedule the task + if (receiver.isTerminated) { + log.warning("Could not reschedule message to be sent because receiving actor has been terminated.") + } else { + scheduleNext(timeout, delay, continuousCancellable) + } + } + } + continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) + continuousCancellable + } + + def schedule(initialDelay: Duration, delay: Duration)(f: ⇒ Unit): Cancellable = { + val continuousCancellable = new ContinuousCancellable + val task = new TimerTask with ContinuousScheduling with Runnable { + def run = f + def run(timeout: HWTimeout) { + dispatcher execute this + scheduleNext(timeout, delay, continuousCancellable) + } + } + continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) + continuousCancellable + } + + def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = { + val continuousCancellable = new ContinuousCancellable + val task = new TimerTask with ContinuousScheduling { + def run(timeout: HWTimeout) { + dispatcher.execute(runnable) + scheduleNext(timeout, delay, continuousCancellable) + } + } + continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay)) + continuousCancellable + } + + def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = { + val task = new TimerTask() { + def run(timeout: HWTimeout) { dispatcher.execute(runnable) } + } + new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) + } + + def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = { + val task = new TimerTask { + def run(timeout: HWTimeout) { + receiver ! message + } + } + new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) + } + + def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable = { + val task = new TimerTask { + def run(timeout: HWTimeout) { + dispatcher.execute(new Runnable { def run = f }) + } + } + new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay)) + } + + private trait ContinuousScheduling { this: TimerTask ⇒ + def scheduleNext(timeout: HWTimeout, delay: Duration, delegator: ContinuousCancellable) { + try { + delegator.swap(timeout.getTimer.newTimeout(this, delay)) + } catch { + case _: IllegalStateException ⇒ // stop recurring if timer is stopped + } + } + } + + private def execDirectly(t: HWTimeout): Unit = { + try t.getTask.run(t) catch { + case e: InterruptedException ⇒ throw e + case e: Exception ⇒ log.error(e, "exception while executing timer task") + } + } + + def close() = { + import scala.collection.JavaConverters._ + hashedWheelTimer.stop().asScala foreach execDirectly + } +} + +/** + * Wrapper of a [[org.jboss.netty.akka.util.Timeout]] that delegates all + * methods. Needed to be able to cancel continuous tasks, + * since they create new Timeout for each tick. + */ +private[akka] class ContinuousCancellable extends Cancellable { + @volatile + private var delegate: HWTimeout = _ + @volatile + private var cancelled = false + + private[akka] def init(initialTimeout: HWTimeout): Unit = { + delegate = initialTimeout + } + + private[akka] def swap(newTimeout: HWTimeout): Unit = { + val wasCancelled = isCancelled + delegate = newTimeout + if (wasCancelled || isCancelled) cancel() + } + + def isCancelled(): Boolean = { + // delegate is initially null, but this object will not be exposed to the world until after init + cancelled || delegate.isCancelled() + } + + def cancel(): Unit = { + // the underlying Timeout will not become cancelled once the task has been started to run, + // therefore we keep a flag here to make sure that rescheduling doesn't occur when cancelled + cancelled = true + // delegate is initially null, but this object will not be exposed to the world until after init + delegate.cancel() + } +} + +class DefaultCancellable(val timeout: HWTimeout) extends Cancellable { + def cancel() { + timeout.cancel() + } + + def isCancelled: Boolean = { + timeout.isCancelled + } +} \ No newline at end of file From dadb25d32b805b07ca775621e0b419a7bcfb0efe Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 09:35:54 +0100 Subject: [PATCH 63/84] Added an extra test --- akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index ef9f3b01bc..c36e52b21c 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -51,7 +51,9 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "return supplied value on timeout" in { val timedOut = Promise.successful[String]("Timedout") val promise = Promise[String]() or timedOut + val promise2 = timedOut or Promise[String]() Await.result(promise, timeout.duration) must be("Timedout") + Await.result(promise2, timeout.duration) must be("Timedout") } } "completed with a result" must { From 5106fd6d4d2d14037827017c7357fe996160bca0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 11:40:26 +0100 Subject: [PATCH 64/84] Removing ClusterName --- akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala | 2 -- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 6 ------ akka-remote/src/main/resources/reference.conf | 1 - .../src/main/scala/akka/remote/RemoteActorRefProvider.scala | 1 - akka-remote/src/main/scala/akka/remote/RemoteSettings.scala | 1 - .../src/test/scala/akka/remote/RemoteConfigSpec.scala | 1 - 6 files changed, 12 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 21a3f26a58..de907827a5 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -41,8 +41,6 @@ trait ActorRefProvider { def nodename: String - def clustername: String - /** * The root path for all actors within this actor system, including remote * address if enabled. diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index e24a3a29f2..00df791166 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -158,11 +158,6 @@ abstract class ActorSystem extends ActorRefFactory { */ def nodename: String - /** - * The logical name of the cluster this actor system belongs to. - */ - def clustername: String - /** * Construct a path below the application guardian to be used with [[ActorSystem.actorFor]]. */ @@ -379,7 +374,6 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor def systemGuardian: InternalActorRef = provider.systemGuardian def deathWatch: DeathWatch = provider.deathWatch def nodename: String = provider.nodename - def clustername: String = provider.clustername def /(actorName: String): ActorPath = guardian.path / actorName def /(path: Iterable[String]): ActorPath = guardian.path / path diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index b3d2027d91..960d663636 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -137,7 +137,6 @@ akka { } cluster { - name = "default-cluster" nodename = "default" seed-nodes = [] } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 037f9d594a..3f95c03d1a 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -33,7 +33,6 @@ class RemoteActorRefProvider( def guardian = local.guardian def systemGuardian = local.systemGuardian def nodename = remoteSettings.NodeName - def clustername = remoteSettings.ClusterName def terminationFuture = local.terminationFuture def dispatcher = local.dispatcher diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 69c921ff25..bc765bae26 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -25,7 +25,6 @@ class RemoteSettings(val config: Config, val systemName: String) { val BackoffTimeout = Duration(config.getMilliseconds("akka.remote.backoff-timeout"), MILLISECONDS) // TODO cluster config will go into akka-cluster/reference.conf when we enable that module - val ClusterName = getString("akka.cluster.name") val SeedNodes = Set.empty[RemoteNettyAddress] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { case RemoteAddressExtractor(addr) ⇒ addr.transport } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index b1a9905b6e..5d053f377b 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -40,7 +40,6 @@ class RemoteConfigSpec extends AkkaSpec("akka.cluster.nodename = node1") { // TODO cluster config will go into akka-cluster/reference.conf when we enable that module //akka.cluster - getString("akka.cluster.name") must equal("default-cluster") getString("akka.cluster.nodename") must equal("node1") getStringList("akka.cluster.seed-nodes") must equal(new java.util.ArrayList[String]) From e28f284c3c0ce6d43415d16b709671227a15ddb9 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 11:43:40 +0100 Subject: [PATCH 65/84] Removing pointledd use of CORBA from the UUID lib --- .../src/main/java/com/eaio/uuid/UUID.java | 3 +- akka-actor/src/main/java/resources/uuid.idl | 55 ------------------- 2 files changed, 1 insertion(+), 57 deletions(-) delete mode 100644 akka-actor/src/main/java/resources/uuid.idl diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUID.java b/akka-actor/src/main/java/com/eaio/uuid/UUID.java index 6c49bcd1c8..46bc867cc0 100644 --- a/akka-actor/src/main/java/com/eaio/uuid/UUID.java +++ b/akka-actor/src/main/java/com/eaio/uuid/UUID.java @@ -54,8 +54,7 @@ import com.eaio.util.lang.Hex; * @author Johann Burkard * @version $Id: UUID.java 1888 2009-03-15 12:43:24Z johann $ */ -public class UUID implements Comparable, Serializable, Cloneable, - IDLEntity { +public class UUID implements Comparable, Serializable, Cloneable { /** * Hasn't ever changed between versions. diff --git a/akka-actor/src/main/java/resources/uuid.idl b/akka-actor/src/main/java/resources/uuid.idl deleted file mode 100644 index 6fe1575add..0000000000 --- a/akka-actor/src/main/java/resources/uuid.idl +++ /dev/null @@ -1,55 +0,0 @@ -/* - * uuid.idl - * - * Created 19:49 16.12.2003 - * - * eaio: UUID - an implementation of the UUID specification - * Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com) http://eaio.com. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN - * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -module com { - module eaio { - module uuid { - -/** - * The UUID struct. - */ - - struct UUID { - -/** - * The time field of the UUID. - */ - - long long time; - -/** - * The clock sequence and node field of the UUID. - */ - - long long clockSeqAndNode; - - }; - - }; - }; -}; \ No newline at end of file From e81791cbb6879b4b2fa2a46da6ce74045293cda5 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 17 Jan 2012 14:07:20 +0100 Subject: [PATCH 66/84] =?UTF-8?q?fix=20dormant=20bug=20in=20Mailbox?= =?UTF-8?q?=E2=80=99s=20error=20logging?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - actor.actor could potentially be null after a failed restart attempt - fix one case of baroqueness in FaultHandling.scala --- akka-actor/src/main/scala/akka/actor/FaultHandling.scala | 8 ++++---- akka-actor/src/main/scala/akka/dispatch/Mailbox.scala | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index afdd683419..2b8e23c6fb 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -13,10 +13,10 @@ case class ChildRestartStats(val child: ActorRef, var maxNrOfRetriesCount: Int = def requestRestartPermission(retriesWindow: (Option[Int], Option[Int])): Boolean = retriesWindow match { - case (Some(retries), _) if retries < 1 ⇒ false - case (Some(retries), None) ⇒ maxNrOfRetriesCount += 1; maxNrOfRetriesCount <= retries - case (x @ (Some(_) | None), Some(window)) ⇒ retriesInWindowOkay(if (x.isDefined) x.get else 1, window) - case (None, _) ⇒ true + case (Some(retries), _) if retries < 1 ⇒ false + case (Some(retries), None) ⇒ maxNrOfRetriesCount += 1; maxNrOfRetriesCount <= retries + case (x, Some(window)) ⇒ retriesInWindowOkay(if (x.isDefined) x.get else 1, window) + case (None, _) ⇒ true } private def retriesInWindowOkay(retries: Int, window: Int): Boolean = { diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 0da0bf13af..d88887d3db 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -214,7 +214,7 @@ private[akka] abstract class Mailbox(val actor: ActorCell) extends MessageQueue } } catch { case e ⇒ - actor.system.eventStream.publish(Error(e, actor.self.path.toString, actor.actor.getClass, "exception during processing system messages, dropping " + SystemMessage.size(nextMessage) + " messages!")) + actor.system.eventStream.publish(Error(e, actor.self.path.toString, this.getClass, "exception during processing system messages, dropping " + SystemMessage.size(nextMessage) + " messages!")) throw e } } From fc0eb1de27033436e8b8c861601c3afd99f17df0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 14:23:02 +0100 Subject: [PATCH 67/84] Changing the semantics of Future.or --- .../test/scala/akka/dispatch/FutureSpec.scala | 2 +- .../src/main/scala/akka/dispatch/Future.scala | 19 ++++++++----------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index c36e52b21c..77dbbd5fa7 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -50,7 +50,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa behave like emptyFuture(_(Promise())) "return supplied value on timeout" in { val timedOut = Promise.successful[String]("Timedout") - val promise = Promise[String]() or timedOut + val promise = Promise.failed[String](new RuntimeException("br0ken")) or timedOut val promise2 = timedOut or Promise[String]() Await.result(promise, timeout.duration) must be("Timedout") Await.result(promise2, timeout.duration) must be("Timedout") diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 75043065a0..a2deacf8a8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -420,21 +420,18 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { } /** - * Returns a new Future that is either the successful result of this Future, the successful result of that Future, - * or the failure from either this or that. In case fails, and the other never completes, - * the returned Future will never be completed. + * Returns a new Future that will either hold the successful value of this Future, + * or, it this Future fails, it will hold the successful result of "that" Future. + * + * This means that if this Future never completes at all, then the returned Future + * won't be completed either. */ def or[U >: T](that: Future[U]): Future[U] = { val p = Promise[U]() - def register(to: Future[U], fallback: Future[U]) = to onComplete { - case r @ Right(_) ⇒ p tryComplete r - case l @ Left(_) ⇒ fallback.value match { - case Some(Left(_)) ⇒ p tryComplete l //If he failed, race for setting failure - case _ ⇒ // Either "that" was successful, or he's not done yet, let him win - } + onComplete { + case r @ Right(_) ⇒ p complete r + case _ ⇒ that onSuccess { case r ⇒ p success r } } - register(this, that) - register(that, this) p } From f01b9486aab5c05dbc97a98bb73fa1d65ee497e9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 14:27:23 +0100 Subject: [PATCH 68/84] Removed create-as and corresponding ActorRecipe. See #1511 --- .../test/scala/akka/actor/DeployerSpec.scala | 21 ------------------- .../routing/ConfiguredLocalRoutingSpec.scala | 2 +- akka-actor/src/main/resources/reference.conf | 8 +------ .../src/main/scala/akka/actor/Deployer.scala | 13 ++---------- .../src/main/scala/akka/routing/Routing.scala | 6 +++--- .../akka/remote/RemoteActorRefProvider.scala | 2 +- .../scala/akka/routing/RemoteRouters.scala | 2 +- .../akka/remote/RemoteDeployerSpec.scala | 1 - 8 files changed, 9 insertions(+), 46 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala index 404fcf5acb..89a153ad57 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala @@ -15,11 +15,6 @@ object DeployerSpec { akka.actor.deployment { /service1 { } - /service3 { - create-as { - class = "akka.actor.DeployerSpec$RecipeActor" - } - } /service-direct { router = from-code } @@ -68,7 +63,6 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { Deploy( service, deployment.get.config, - None, NoRouter, LocalScope))) } @@ -79,20 +73,6 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { deployment must be(None) } - "be able to parse 'akka.actor.deployment._' with recipe" in { - val service = "/service3" - val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service) - deployment must be('defined) - - deployment must be(Some( - Deploy( - service, - deployment.get.config, - Some(ActorRecipe(classOf[DeployerSpec.RecipeActor])), - NoRouter, - LocalScope))) - } - "detect invalid number-of-instances" in { intercept[com.typesafe.config.ConfigException.WrongType] { val invalidDeployerConf = ConfigFactory.parseString(""" @@ -137,7 +117,6 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service) deployment must be('defined) deployment.get.path must be(service) - deployment.get.recipe must be(None) deployment.get.routing.getClass must be(expected.getClass) deployment.get.routing.resizer must be(expected.resizer) deployment.get.scope must be(LocalScope) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index dd4e45f5cb..032c4da921 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -15,7 +15,7 @@ class ConfiguredLocalRoutingSpec extends AkkaSpec with DefaultTimeout with Impli "RouterConfig" must { "be overridable in config" in { - deployer.deploy(Deploy("/config", null, None, RandomRouter(4), LocalScope)) + deployer.deploy(Deploy("/config", null, RandomRouter(4), LocalScope)) val actor = system.actorOf(Props(new Actor { def receive = { case "get" ⇒ sender ! context.props diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index de4b46709b..f6b7188192 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -74,8 +74,7 @@ akka { # that the values specified in the code shall be used. # In case of routing, the actors to be routed to can be specified # in several ways: - # - nr-of-instances: will create that many children given the actor factory - # supplied in the source code (overridable using create-as below) + # - nr-of-instances: will create that many children # - routees.paths: will look the paths up using actorFor and route to # them, i.e. will not create children # - resizer: dynamically resizable number of routees as specified in resizer below @@ -88,11 +87,6 @@ akka { # within is the timeout used for routers containing future calls within = 5 seconds - create-as { - # fully qualified class name of recipe implementation - class = "" - } - routees { # Alternatively to giving nr-of-instances you can specify the full # paths of those actors which should be routed to. This setting takes diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 5ac4c13391..54ba5c1a57 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -15,7 +15,7 @@ import akka.routing._ import java.util.concurrent.{ TimeUnit, ConcurrentHashMap } import akka.util.ReflectiveAccess -case class Deploy(path: String, config: Config, recipe: Option[ActorRecipe] = None, routing: RouterConfig = NoRouter, scope: Scope = LocalScope) +case class Deploy(path: String, config: Config, routing: RouterConfig = NoRouter, scope: Scope = LocalScope) case class ActorRecipe(implementationClass: Class[_ <: Actor]) //TODO Add ActorConfiguration here @@ -82,16 +82,7 @@ class Deployer(val settings: ActorSystem.Settings) { } } - val recipe: Option[ActorRecipe] = - deployment.getString("create-as.class") match { - case "" ⇒ None - case impl ⇒ - val implementationClass = getClassFor[Actor](impl).fold(e ⇒ throw new ConfigurationException( - "Config option [akka.actor.deployment." + key + ".create-as.class] load failed", e), identity) - Some(ActorRecipe(implementationClass)) - } - - Some(Deploy(key, deployment, recipe, router, LocalScope)) + Some(Deploy(key, deployment, router, LocalScope)) } } diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 0473f99fd6..abc1c80713 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -126,9 +126,9 @@ trait RouterConfig { def adaptFromDeploy(deploy: Option[Deploy]): RouterConfig = { deploy match { - case Some(Deploy(_, _, _, NoRouter, _)) ⇒ this - case Some(Deploy(_, _, _, r, _)) ⇒ r - case _ ⇒ this + case Some(Deploy(_, _, NoRouter, _)) ⇒ this + case Some(Deploy(_, _, r, _)) ⇒ r + case _ ⇒ this } } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 3f95c03d1a..bcfb98e76d 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -103,7 +103,7 @@ class RemoteActorRefProvider( }) deployment match { - case Some(Deploy(_, _, _, _, RemoteScope(address))) ⇒ + case Some(Deploy(_, _, _, RemoteScope(address))) ⇒ // FIXME RK this should be done within the deployer, i.e. the whole parsing business address.parse(remote.transports) match { case Left(x) ⇒ diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala index 2b0a685f32..c8f61f471e 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouters.scala @@ -22,7 +22,7 @@ trait RemoteRouterConfig extends RouterConfig { val impl = context.system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 IndexedSeq.empty[ActorRef] ++ (for (i ← 1 to nrOfInstances) yield { val name = "c" + i - val deploy = Deploy("", ConfigFactory.empty(), None, props.routerConfig, RemoteScope(node.next)) + val deploy = Deploy("", ConfigFactory.empty(), props.routerConfig, RemoteScope(node.next)) impl.provider.actorOf(impl, props, context.self.asInstanceOf[InternalActorRef], context.self.path / name, false, Some(deploy)) }) } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala index bb219f3d55..1e78b3ebe3 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala @@ -41,7 +41,6 @@ class RemoteDeployerSpec extends AkkaSpec(RemoteDeployerSpec.deployerConf) { Deploy( service, deployment.get.config, - None, RoundRobinRouter(3), RemoteScope(UnparsedSystemAddress(Some("sys"), UnparsedTransportAddress("akka", "wallace", 2552)))))) } From e4b6cfbfbec50bc863a37daf9368374944d5267a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 14:39:02 +0100 Subject: [PATCH 69/84] Revering moving of the Create() message, leave it there ;) --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 2 -- .../src/main/scala/akka/dispatch/AbstractDispatcher.scala | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index a0238fe7c8..8b0fe89ec2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -290,8 +290,6 @@ private[akka] class ActorCell( parent.sendSystemMessage(akka.dispatch.Supervise(self)) dispatcher.attach(this) - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - dispatcher.systemDispatch(this, Create()) } // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 66014e25cc..0a868eef7d 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -208,6 +208,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext */ protected[akka] def register(actor: ActorCell) { inhabitantsUpdater.incrementAndGet(this) + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + systemDispatch(actor, Create()) } /** From d6708184c6207425219cac6e1fa99fc0b241e586 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 14:48:46 +0100 Subject: [PATCH 70/84] Minor improvements based on feedback. See #1606 --- .../src/main/scala/akka/routing/Routing.scala | 14 +++++++++----- akka-docs/java/remoting.rst | 2 +- .../main/scala/akka/remote/RemoteDeployer.scala | 5 +---- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index dc251145d7..7d711df200 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -175,11 +175,14 @@ class RouteeProvider(ref: RoutedActorRef, val context: ActorContext, val resizer context.self.asInstanceOf[RoutedActorRef].removeRoutees(routees) } - def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = (nrOfInstances, routees) match { - case (0, Nil) ⇒ throw new IllegalArgumentException("Insufficient information - missing configuration.") - case (x, Nil) ⇒ (1 to x).map(_ ⇒ context.actorOf(props))(scala.collection.breakOut) - case (_, xs) ⇒ xs.map(context.actorFor(_))(scala.collection.breakOut) - } + def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = + (nrOfInstances, routees) match { + case (x, Nil) if x <= 0 ⇒ + throw new IllegalArgumentException( + "Must specify nrOfInstances or routees for [%s]" format context.self.path.toString) + case (x, Nil) ⇒ (1 to x).map(_ ⇒ context.actorOf(props))(scala.collection.breakOut) + case (_, xs) ⇒ xs.map(context.actorFor(_))(scala.collection.breakOut) + } def createAndRegisterRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): Unit = { if (resizer.isEmpty) { @@ -748,6 +751,7 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ { case (sender, message) ⇒ + // FIXME avoid this cast val asker = routeeProvider.context.asInstanceOf[ActorCell].systemImpl.provider.ask(Timeout(within)).get asker.result.pipeTo(sender) message match { diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index 909315af46..05101497e1 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -69,7 +69,7 @@ The "app" in this case refers to the name of the ``ActorSystem``:: actor { deployment { /serviceA/retrieval { - remote = "akka://app@10.0.0.1:2552” + remote = "akka://app@10.0.0.1:2552" } } } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index 120a2b87bb..de3e0825ff 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -23,10 +23,7 @@ class RemoteDeployer(_settings: ActorSystem.Settings) extends Deployer(_settings if (!str.isEmpty) throw new ConfigurationException("unparseable remote node name " + str) val nodes = deploy.config.getStringList("target.nodes").asScala if (nodes.isEmpty || deploy.routing == NoRouter) d - else { - val r = new RemoteRouterConfig(deploy.routing, nodes) - Some(deploy.copy(routing = r)) - } + else Some(deploy.copy(routing = new RemoteRouterConfig(deploy.routing, nodes))) } case None ⇒ None } From 5006f96348e52db734491f3e09d4e1f2060e829e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 14:51:21 +0100 Subject: [PATCH 71/84] Moving the creation fo the Create() message to ActorCell --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 3 +++ .../src/main/scala/akka/dispatch/AbstractDispatcher.scala | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 8b0fe89ec2..c804f5b56f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -286,6 +286,9 @@ private[akka] class ActorCell( final def start(): Unit = { mailbox = dispatcher.createMailbox(this) + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + dispatcher.systemDispatch(this, Create()) + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ parent.sendSystemMessage(akka.dispatch.Supervise(self)) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 0a868eef7d..66014e25cc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -208,8 +208,6 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext */ protected[akka] def register(actor: ActorCell) { inhabitantsUpdater.incrementAndGet(this) - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - systemDispatch(actor, Create()) } /** From 21ec737b4cdb2f0867d73d80fbdcba9158f2270c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 14:58:31 +0100 Subject: [PATCH 72/84] Should have been in previous merge commit --- .../src/main/scala/akka/routing/RemoteRouterConfig.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala index 7cc47caa77..43e6c0d38f 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala @@ -62,7 +62,7 @@ class RemoteRouteeProvider(nodes: Iterable[String], _ref: RoutedActorRef, _conte val impl = context.system.asInstanceOf[ActorSystemImpl] //TODO ticket #1559 IndexedSeq.empty[ActorRef] ++ (for (i ← 1 to nrOfInstances) yield { val name = "c" + i - val deploy = Deploy("", ConfigFactory.empty(), None, props.routerConfig, RemoteScope(nodeAddressIter.next)) + val deploy = Deploy("", ConfigFactory.empty(), props.routerConfig, RemoteScope(nodeAddressIter.next)) impl.provider.actorOf(impl, props, context.self.asInstanceOf[InternalActorRef], context.self.path / name, false, Some(deploy)) }) From 387ffe1bce145559583224caccb55d73cdd05498 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 15:23:13 +0100 Subject: [PATCH 73/84] Limit of default-dispatcher pool size, since we run tests in parallel --- akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index b98937b126..7fb0307fe8 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -29,6 +29,11 @@ object AkkaSpec { actor { default-dispatcher { core-pool-size-factor = 2 + core-pool-size-min = 4 + core-pool-size-max = 8 + max-pool-size-factor = 2 + max-pool-size-min = 4 + max-pool-size-max = 8 } } } From bfd11ad8c5deab4bc3dc5a84c090d9c28251afa6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 15:25:26 +0100 Subject: [PATCH 74/84] Removing nodename --- .../src/main/scala/akka/actor/ActorCell.scala | 4 ++-- .../scala/akka/actor/ActorRefProvider.scala | 5 ----- .../main/scala/akka/actor/ActorSystem.scala | 6 ------ .../RoundRobin1ReplicaMultiJvmSpec.scala | 3 +-- akka-docs/dev/multi-jvm-testing.rst | 19 ++++++++----------- akka-remote/src/main/resources/reference.conf | 1 - .../src/main/scala/akka/remote/Remote.scala | 4 ++-- .../akka/remote/RemoteActorRefProvider.scala | 1 - .../scala/akka/remote/RemoteSettings.scala | 5 ----- .../AbstractRemoteActorMultiJvmSpec.scala | 1 - .../DirectRoutedRemoteActorMultiJvmSpec.scala | 4 ++-- .../remote/NewRemoteActorMultiJvmSpec.scala | 4 ++-- .../RandomRoutedRemoteActorMultiJvmSpec.scala | 8 ++++---- ...ndRobinRoutedRemoteActorMultiJvmSpec.scala | 9 +++++---- ...rGatherRoutedRemoteActorMultiJvmSpec.scala | 4 ++-- .../akka/remote/RemoteCommunicationSpec.scala | 1 - .../scala/akka/remote/RemoteConfigSpec.scala | 3 +-- .../akka/remote/RemoteDeathWatchSpec.scala | 1 - .../akka/remote/RemoteDeployerSpec.scala | 1 - .../scala/akka/remote/RemoteRouterSpec.scala | 1 - .../src/main/resources/application.conf | 3 --- 21 files changed, 29 insertions(+), 59 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index c804f5b56f..40f94b4210 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -287,10 +287,10 @@ private[akka] class ActorCell( mailbox = dispatcher.createMailbox(this) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - dispatcher.systemDispatch(this, Create()) + parent.sendSystemMessage(akka.dispatch.Supervise(self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - parent.sendSystemMessage(akka.dispatch.Supervise(self)) + dispatcher.systemDispatch(this, Create()) dispatcher.attach(this) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index de907827a5..7bc9f502d1 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -39,8 +39,6 @@ trait ActorRefProvider { */ def deathWatch: DeathWatch - def nodename: String - /** * The root path for all actors within this actor system, including remote * address if enabled. @@ -285,9 +283,6 @@ class LocalActorRefProvider( new RootActorPath(LocalAddress(_systemName)), new Deployer(settings)) - val nodename: String = "local" - val clustername: String = "local" - val log = Logging(eventStream, "LocalActorRefProvider(" + rootPath.address + ")") /* diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 00df791166..174d13b9d2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -153,11 +153,6 @@ abstract class ActorSystem extends ActorRefFactory { */ def logConfiguration(): Unit - /** - * The logical node name where this actor system resides. - */ - def nodename: String - /** * Construct a path below the application guardian to be used with [[ActorSystem.actorFor]]. */ @@ -373,7 +368,6 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor def guardian: InternalActorRef = provider.guardian def systemGuardian: InternalActorRef = provider.systemGuardian def deathWatch: DeathWatch = provider.deathWatch - def nodename: String = provider.nodename def /(actorName: String): ActorPath = guardian.path / actorName def /(path: Iterable[String]): ActorPath = guardian.path / path diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala index 35938749ba..3605ba9d34 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala @@ -17,8 +17,7 @@ object RoundRobin1ReplicaMultiJvmSpec { class HelloWorld extends Actor with Serializable { def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") + case "Hello" ⇒ reply("World from node [" + Config.nodename + "]") } } diff --git a/akka-docs/dev/multi-jvm-testing.rst b/akka-docs/dev/multi-jvm-testing.rst index dfad74f84e..9fc42349f2 100644 --- a/akka-docs/dev/multi-jvm-testing.rst +++ b/akka-docs/dev/multi-jvm-testing.rst @@ -159,21 +159,20 @@ You can define specific JVM options for each of the spawned JVMs. You do that by a file named after the node in the test with suffix ``.opts`` and put them in the same directory as the test. -For example, to feed the JVM options ``-Dakka.cluster.nodename=node1`` and -``-Dakka.remote.port=9991`` to the ``SampleMultiJvmNode1`` let's create three ``*.opts`` files -and add the options to them. +For example, to feed the JVM options ``-Dakka.remote.port=9991`` to the ``SampleMultiJvmNode1`` +let's create three ``*.opts`` files and add the options to them. ``SampleMultiJvmNode1.opts``:: - -Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 + -Dakka.remote.port=9991 ``SampleMultiJvmNode2.opts``:: - -Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 + -Dakka.remote.port=9992 ``SampleMultiJvmNode3.opts``:: - -Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 + -Dakka.remote.port=9993 Overriding configuration options @@ -188,15 +187,15 @@ For example, to override the configuration option ``akka.cluster.name`` let's cr ``SampleMultiJvmNode1.conf``:: - akka.cluster.name = "test-cluster" + akka.remote.port = 9991 ``SampleMultiJvmNode2.conf``:: - akka.cluster.name = "test-cluster" + akka.remote.port = 9992 ``SampleMultiJvmNode3.conf``:: - akka.cluster.name = "test-cluster" + akka.remote.port = 9993 ScalaTest @@ -277,7 +276,6 @@ something in coordination:: "A cluster" must { "have jvm options" in { - System.getProperty("akka.cluster.nodename", "") must be("node1") System.getProperty("akka.remote.port", "") must be("9991") akka.config.Config.config.getString("test.name", "") must be("node1") } @@ -298,7 +296,6 @@ something in coordination:: "A cluster" must { "have jvm options" in { - System.getProperty("akka.cluster.nodename", "") must be("node2") System.getProperty("akka.remote.port", "") must be("9992") akka.config.Config.config.getString("test.name", "") must be("node2") } diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 960d663636..814a85f591 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -137,7 +137,6 @@ akka { } cluster { - nodename = "default" seed-nodes = [] } } diff --git a/akka-remote/src/main/scala/akka/remote/Remote.scala b/akka-remote/src/main/scala/akka/remote/Remote.scala index 6efa542e0e..f76aa8e908 100644 --- a/akka-remote/src/main/scala/akka/remote/Remote.scala +++ b/akka-remote/src/main/scala/akka/remote/Remote.scala @@ -110,7 +110,7 @@ class Remote(val settings: ActorSystem.Settings, val remoteSettings: RemoteSetti } } - log.info("Starting remote server on [{}] with node name [{}]", remoteAddress, provider.nodename) + log.info("Starting remote server on [{}@{}]", system.name, remoteAddress) } } @@ -153,7 +153,7 @@ class RemoteSystemDaemon(system: ActorSystemImpl, remote: Remote, _path: ActorPa override def !(msg: Any)(implicit sender: ActorRef = null): Unit = msg match { case message: DaemonMsg ⇒ - log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, remote.remoteSettings.NodeName) + log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, path.address.hostPort) message match { case DaemonMsgCreate(factory, path, supervisor) ⇒ import remote.remoteAddress diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index bcfb98e76d..397f110783 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -32,7 +32,6 @@ class RemoteActorRefProvider( def rootGuardian = local.rootGuardian def guardian = local.guardian def systemGuardian = local.systemGuardian - def nodename = remoteSettings.NodeName def terminationFuture = local.terminationFuture def dispatcher = local.dispatcher diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index bc765bae26..ad0356c009 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -29,11 +29,6 @@ class RemoteSettings(val config: Config, val systemName: String) { case RemoteAddressExtractor(addr) ⇒ addr.transport } - val NodeName: String = config.getString("akka.cluster.nodename") match { - case "" ⇒ throw new ConfigurationException("Configuration option 'akka.cluster.nodename' must be non-empty.") - case value ⇒ value - } - val serverSettings = new RemoteServerSettings val clientSettings = new RemoteClientSettings diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala index 0bc6d33f68..597b552fe9 100755 --- a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala @@ -23,7 +23,6 @@ trait AbstractRemoteActorMultiJvmSpec { akka { remote.server.hostname="%s" remote.server.port = "%d" - cluster.nodename = "node%d" }""".format(host, 9990+idx, idx)) withFallback commonConfig } } diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/DirectRoutedRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/DirectRoutedRemoteActorMultiJvmSpec.scala index 1b7a561cda..97b0e9c7e0 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/DirectRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/DirectRoutedRemoteActorMultiJvmSpec.scala @@ -11,7 +11,7 @@ object DirectRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSp class SomeActor extends Actor with Serializable { def receive = { - case "identify" ⇒ sender ! context.system.nodename + case "identify" ⇒ sender ! self.path.address.hostPort } } @@ -52,7 +52,7 @@ class DirectRoutedRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(DirectRoutedRe val actor = system.actorOf(Props[SomeActor], "service-hello") actor.isInstanceOf[RemoteActorRef] must be(true) - Await.result(actor ? "identify", timeout.duration) must equal("node1") + Await.result(actor ? "identify", timeout.duration) must equal("AkkaRemoteSpec@localhost:9991") barrier("done") } diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/NewRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/NewRemoteActorMultiJvmSpec.scala index f7a02c3988..e6ed14419f 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/NewRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/NewRemoteActorMultiJvmSpec.scala @@ -12,7 +12,7 @@ object NewRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { class SomeActor extends Actor with Serializable { def receive = { - case "identify" ⇒ sender ! context.system.nodename + case "identify" ⇒ sender ! self.path.address.hostPort } } @@ -55,7 +55,7 @@ class NewRemoteActorMultiJvmNode2 extends AkkaRemoteSpec(NewRemoteActorMultiJvmS barrier("start") val actor = system.actorOf(Props[SomeActor], "service-hello") - Await.result(actor ? "identify", timeout.duration) must equal("node1") + Await.result(actor ? "identify", timeout.duration) must equal("AkkaRemoteSpec@localhost:9991") barrier("done") } diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/RandomRoutedRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/RandomRoutedRemoteActorMultiJvmSpec.scala index b9440d28b3..c2cc058f8d 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/RandomRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/RandomRoutedRemoteActorMultiJvmSpec.scala @@ -10,7 +10,7 @@ object RandomRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJvmSp override def NrOfNodes = 4 class SomeActor extends Actor with Serializable { def receive = { - case "hit" ⇒ sender ! context.system.nodename + case "hit" ⇒ sender ! self.path.address.hostPort case "end" ⇒ context.stop(self) } } @@ -83,9 +83,9 @@ class RandomRoutedRemoteActorMultiJvmNode4 extends AkkaRemoteSpec(RandomRoutedRe val iterationCount = 10 var replies = Map( - "node1" -> 0, - "node2" -> 0, - "node3" -> 0) + "AkkaRemoteSpec@localhost:9991" -> 0, + "AkkaRemoteSpec@localhost:9992" -> 0, + "AkkaRemoteSpec@localhost:9993" -> 0) for (i ← 0 until iterationCount) { for (k ← 0 until connectionCount) { diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/RoundRobinRoutedRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/RoundRobinRoutedRemoteActorMultiJvmSpec.scala index 5b5c6cbf6d..38e3182957 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/RoundRobinRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/RoundRobinRoutedRemoteActorMultiJvmSpec.scala @@ -11,7 +11,7 @@ object RoundRobinRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMultiJ class SomeActor extends Actor with Serializable { def receive = { - case "hit" ⇒ sender ! context.system.nodename + case "hit" ⇒ sender ! self.path.address.hostPort case "end" ⇒ context.stop(self) } } @@ -84,13 +84,14 @@ class RoundRobinRoutedRemoteActorMultiJvmNode4 extends AkkaRemoteSpec(RoundRobin val iterationCount = 10 var replies = Map( - "node1" -> 0, - "node2" -> 0, - "node3" -> 0) + "AkkaRemoteSpec@localhost:9991" -> 0, + "AkkaRemoteSpec@localhost:9992" -> 0, + "AkkaRemoteSpec@localhost:9993" -> 0) for (i ← 0 until iterationCount) { for (k ← 0 until connectionCount) { val nodeName = Await.result(actor ? "hit", timeout.duration).toString + replies = replies + (nodeName -> (replies(nodeName) + 1)) } } diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/ScatterGatherRoutedRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/ScatterGatherRoutedRemoteActorMultiJvmSpec.scala index c34de28882..48ef5adcc9 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/ScatterGatherRoutedRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/ScatterGatherRoutedRemoteActorMultiJvmSpec.scala @@ -10,7 +10,7 @@ object ScatterGatherRoutedRemoteActorMultiJvmSpec extends AbstractRemoteActorMul override def NrOfNodes = 4 class SomeActor extends Actor with Serializable { def receive = { - case "hit" ⇒ sender ! context.system.nodename + case "hit" ⇒ sender ! self.path.address.hostPort case "end" ⇒ context.stop(self) } } @@ -91,7 +91,7 @@ class ScatterGatherRoutedRemoteActorMultiJvmNode4 extends AkkaRemoteSpec(Scatter val replies = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { case name: String ⇒ (name, 1) - }).foldLeft(Map("node1" -> 0, "node2" -> 0, "node3" -> 0)) { + }).foldLeft(Map("AkkaRemoteSpec@localhost:9991" -> 0, "AkkaRemoteSpec@localhost:9992" -> 0, "AkkaRemoteSpec@localhost:9993" -> 0)) { case (m, (n, c)) ⇒ m + (n -> (m(n) + c)) } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala index dd62ae48e2..58199b4683 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala @@ -33,7 +33,6 @@ object RemoteCommunicationSpec { class RemoteCommunicationSpec extends AkkaSpec(""" akka { actor.provider = "akka.remote.RemoteActorRefProvider" - cluster.nodename = Nonsense remote.server { hostname = localhost port = 12345 diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index 5d053f377b..03a343f3b1 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -3,7 +3,7 @@ package akka.remote import akka.testkit.AkkaSpec @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class RemoteConfigSpec extends AkkaSpec("akka.cluster.nodename = node1") { +class RemoteConfigSpec extends AkkaSpec("") { "RemoteExtension" must { "be able to parse remote and cluster config elements" in { @@ -40,7 +40,6 @@ class RemoteConfigSpec extends AkkaSpec("akka.cluster.nodename = node1") { // TODO cluster config will go into akka-cluster/reference.conf when we enable that module //akka.cluster - getString("akka.cluster.nodename") must equal("node1") getStringList("akka.cluster.seed-nodes") must equal(new java.util.ArrayList[String]) // getMilliseconds("akka.cluster.max-time-to-wait-until-connected") must equal(30 * 1000) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala index b51720aa01..80a4f3cffe 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala @@ -16,7 +16,6 @@ akka { /watchers.remote = "akka://other@127.0.0.1:2666" } } - cluster.nodename = buh remote.server { hostname = "127.0.0.1" port = 2665 diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala index 1e78b3ebe3..15016748d0 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala @@ -11,7 +11,6 @@ import com.typesafe.config._ object RemoteDeployerSpec { val deployerConf = ConfigFactory.parseString(""" akka.actor.provider = "akka.remote.RemoteActorRefProvider" - akka.cluster.nodename = Whatever akka.actor.deployment { /user/service2 { router = round-robin diff --git a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala index f183a940a7..5c06f36804 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala @@ -19,7 +19,6 @@ object RemoteRouterSpec { class RemoteRouterSpec extends AkkaSpec(""" akka { actor.provider = "akka.remote.RemoteActorRefProvider" - cluster.nodename = Nonsense remote.server { hostname = localhost port = 12345 diff --git a/akka-samples/akka-sample-remote/src/main/resources/application.conf b/akka-samples/akka-sample-remote/src/main/resources/application.conf index 6f50121cf4..ce550b33eb 100644 --- a/akka-samples/akka-sample-remote/src/main/resources/application.conf +++ b/akka-samples/akka-sample-remote/src/main/resources/application.conf @@ -4,7 +4,6 @@ calculator { akka { remote.server.port = 2552 - cluster.nodename = "n1" } } //#calculator @@ -15,7 +14,6 @@ remotelookup { akka { remote.server.port = 2553 - cluster.nodename = "n2" } } //#remotelookup @@ -34,7 +32,6 @@ remotecreation { } remote.server.port = 2554 - cluster.nodename = "n3" } } //#remotecreation From 24a6a66ca028b8370583a95b31a688e625edd76d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 15:31:59 +0100 Subject: [PATCH 75/84] Making sure that if the second Future fails, the returned future also fails --- akka-actor/src/main/scala/akka/dispatch/Future.scala | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index a2deacf8a8..239de2f0e9 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -421,16 +421,13 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { /** * Returns a new Future that will either hold the successful value of this Future, - * or, it this Future fails, it will hold the successful result of "that" Future. - * - * This means that if this Future never completes at all, then the returned Future - * won't be completed either. + * or, it this Future fails, it will hold the result of "that" Future. */ def or[U >: T](that: Future[U]): Future[U] = { val p = Promise[U]() onComplete { case r @ Right(_) ⇒ p complete r - case _ ⇒ that onSuccess { case r ⇒ p success r } + case _ ⇒ p completeWith that } p } From a019c9e8a61bc920d1efa839c2c30821fbf4ae60 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 15:36:24 +0100 Subject: [PATCH 76/84] Adding tests for the chaining --- .../src/test/scala/akka/dispatch/FutureSpec.scala | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index 77dbbd5fa7..8048526f2a 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -49,11 +49,18 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "never completed" must { behave like emptyFuture(_(Promise())) "return supplied value on timeout" in { + val f = new RuntimeException("br0ken") val timedOut = Promise.successful[String]("Timedout") - val promise = Promise.failed[String](new RuntimeException("br0ken")) or timedOut + val promise1 = Promise.failed[String](f) or timedOut val promise2 = timedOut or Promise[String]() - Await.result(promise, timeout.duration) must be("Timedout") + val promise3 = Promise.failed[String](f) or Promise.failed[String](f) or timedOut + val promise4 = Promise.failed[String](f) or Promise.failed[String](new RuntimeException("last")) + Await.result(promise1, timeout.duration) must be("Timedout") Await.result(promise2, timeout.duration) must be("Timedout") + Await.result(promise3, timeout.duration) must be("Timedout") + intercept[RuntimeException] { + Await.result(promise4, timeout.duration) + }.getMessage must be("last") } } "completed with a result" must { From d95e0629b54f3c675a8ca7390893618b4b621d13 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 15:53:12 +0100 Subject: [PATCH 77/84] Removed the RoutedActorRef from RouteeProvider constructor. See #1606 --- .../src/main/scala/akka/routing/Routing.scala | 47 ++++++++++++++----- .../akka/routing/RemoteRouterConfig.scala | 7 ++- 2 files changed, 38 insertions(+), 16 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 7dea90367d..2bbb819b2d 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -33,18 +33,30 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup private var _routees: IndexedSeq[ActorRef] = IndexedSeq.empty[ActorRef] // this MUST be initialized during createRoute def routees = _routees - def addRoutees(newRoutees: IndexedSeq[ActorRef]) { + /** + * Adds the routees to existing routees. + * Adds death watch of the routees so that they are removed when terminated. + * Not thread safe, but intended to be called from protected points, such as + * `RouterConfig.createRoute` and `Resizer.resize` + */ + private[akka] def addRoutees(newRoutees: IndexedSeq[ActorRef]) { _routees = _routees ++ newRoutees // subscribe to Terminated messages for all route destinations, to be handled by Router actor newRoutees foreach underlying.watch } - def removeRoutees(abandonedRoutees: IndexedSeq[ActorRef]) { + /** + * Adds the routees to existing routees. + * Removes death watch of the routees. Doesn't stop the routees. + * Not thread safe, but intended to be called from protected points, such as + * `Resizer.resize` + */ + private[akka] def removeRoutees(abandonedRoutees: IndexedSeq[ActorRef]) { _routees = _routees diff abandonedRoutees abandonedRoutees foreach underlying.unwatch } - private val routeeProvider = _props.routerConfig.createRouteeProvider(this, actorContext) + private val routeeProvider = _props.routerConfig.createRouteeProvider(actorContext) val route = _props.routerConfig.createRoute(routeeProps, routeeProvider) // initial resize, before message send resize() @@ -123,8 +135,7 @@ trait RouterConfig { def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route - protected[akka] def createRouteeProvider(ref: RoutedActorRef, context: ActorContext) = - new RouteeProvider(ref, context, resizer) + def createRouteeProvider(context: ActorContext) = new RouteeProvider(context, resizer) def createActor(): Router = new Router {} @@ -151,28 +162,38 @@ trait RouterConfig { * Uses `context.actorOf` to create routees from nrOfInstances property * and `context.actorFor` lookup routees from paths. */ -class RouteeProvider(ref: RoutedActorRef, val context: ActorContext, val resizer: Option[Resizer]) { +class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) { + /** - * Adds new routees to the router. + * Adds the routees to the router. + * Adds death watch of the routees so that they are removed when terminated. + * Not thread safe, but intended to be called from protected points, such as + * `RouterConfig.createRoute` and `Resizer.resize`. */ def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = { - context.self.asInstanceOf[RoutedActorRef].addRoutees(routees) + routedRef.addRoutees(routees) } /** - * Adds new routees to the router. + * Adds the routees to the router. + * Adds death watch of the routees so that they are removed when terminated. + * Not thread safe, but intended to be called from protected points, such as + * `RouterConfig.createRoute` and `Resizer.resize`. * Java API. */ - protected def registerRoutees(routees: java.util.List[ActorRef]): Unit = { + def registerRoutees(routees: java.util.List[ActorRef]): Unit = { import scala.collection.JavaConverters._ registerRoutees(routees.asScala.toIndexedSeq) } /** * Removes routees from the router. This method doesn't stop the routees. + * Removes death watch of the routees. + * Not thread safe, but intended to be called from protected points, such as + * `Resizer.resize`. */ def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = { - context.self.asInstanceOf[RoutedActorRef].removeRoutees(routees) + routedRef.removeRoutees(routees) } def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] = @@ -193,7 +214,9 @@ class RouteeProvider(ref: RoutedActorRef, val context: ActorContext, val resizer /** * All routees of the router */ - def routees: IndexedSeq[ActorRef] = ref.routees + def routees: IndexedSeq[ActorRef] = routedRef.routees + + private def routedRef = context.self.asInstanceOf[RoutedActorRef] } diff --git a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala index 43e6c0d38f..dffb874be6 100644 --- a/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/routing/RemoteRouterConfig.scala @@ -22,8 +22,7 @@ import akka.remote.RemoteAddressExtractor */ class RemoteRouterConfig(local: RouterConfig, nodes: Iterable[String]) extends RouterConfig { - override protected[akka] def createRouteeProvider(ref: RoutedActorRef, context: ActorContext) = - new RemoteRouteeProvider(nodes, ref, context, resizer) + override def createRouteeProvider(context: ActorContext) = new RemoteRouteeProvider(nodes, context, resizer) override def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route = { local.createRoute(routeeProps, routeeProvider) @@ -41,8 +40,8 @@ class RemoteRouterConfig(local: RouterConfig, nodes: Iterable[String]) extends R * * Routee paths may not be combined with remote target nodes. */ -class RemoteRouteeProvider(nodes: Iterable[String], _ref: RoutedActorRef, _context: ActorContext, _resizer: Option[Resizer]) - extends RouteeProvider(_ref, _context, _resizer) { +class RemoteRouteeProvider(nodes: Iterable[String], _context: ActorContext, _resizer: Option[Resizer]) + extends RouteeProvider(_context, _resizer) { // need this iterator as instance variable since Resizer may call createRoutees several times private val nodeAddressIter = { From 3f0b1772c82799eb2cd0fc3bb87c093ff7b5f506 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 17 Jan 2012 15:54:56 +0100 Subject: [PATCH 78/84] Simplified the test cases for Future.or --- .../test/scala/akka/dispatch/FutureSpec.scala | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index 8048526f2a..f8aa76a7ca 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -49,17 +49,16 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "never completed" must { behave like emptyFuture(_(Promise())) "return supplied value on timeout" in { - val f = new RuntimeException("br0ken") + val failure = Promise.failed[String](new RuntimeException("br0ken")) + val otherFailure = Promise.failed[String](new RuntimeException("last")) + val empty = Promise[String]() val timedOut = Promise.successful[String]("Timedout") - val promise1 = Promise.failed[String](f) or timedOut - val promise2 = timedOut or Promise[String]() - val promise3 = Promise.failed[String](f) or Promise.failed[String](f) or timedOut - val promise4 = Promise.failed[String](f) or Promise.failed[String](new RuntimeException("last")) - Await.result(promise1, timeout.duration) must be("Timedout") - Await.result(promise2, timeout.duration) must be("Timedout") - Await.result(promise3, timeout.duration) must be("Timedout") + + Await.result(failure or timedOut, timeout.duration) must be("Timedout") + Await.result(timedOut or empty, timeout.duration) must be("Timedout") + Await.result(failure or failure or timedOut, timeout.duration) must be("Timedout") intercept[RuntimeException] { - Await.result(promise4, timeout.duration) + Await.result(failure or otherFailure, timeout.duration) }.getMessage must be("last") } } From d62902c43c18bc85c254ee7421b1561c38f3eb1d Mon Sep 17 00:00:00 2001 From: Roland Kuhn Date: Tue, 17 Jan 2012 15:58:34 +0100 Subject: [PATCH 79/84] =?UTF-8?q?fix=20a=20few=20typos=20in=20ActorSystem?= =?UTF-8?q?=E2=80=99s=20scaladoc.=20We=20need=20those=20compiled=20snippet?= =?UTF-8?q?s=20NOW.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 3ed6552497..a84d080536 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -118,9 +118,9 @@ object ActorSystem { * system.actorOf(props) * * // Scala - * system.actorOf(Props[MyActor]("name") - * system.actorOf(Props[MyActor] - * system.actorOf(Props(new MyActor(...)) + * system.actorOf(Props[MyActor], "name") + * system.actorOf(Props[MyActor]) + * system.actorOf(Props(new MyActor(...))) * * // Java * system.actorOf(MyActor.class); From 517fceae34a5083c5b1c67b66351d65dba92c7dd Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 17 Jan 2012 17:28:57 +0100 Subject: [PATCH 80/84] Adjust pool size of default-dispatcher. See #1654 * Changed reference.conf core-pool-size-min = 6 core-pool-size-factor = 3.0 core-pool-size-max = 64 max-pool-size-min = 6 max-pool-size-factor = 3.0 max-pool-size-max = 64 * Limited to smaller pool size in AkkaSpec * Adjusted some tests that needed more threads --- .../akka/actor/LocalActorRefProviderSpec.scala | 15 ++++++++++++++- .../src/test/scala/akka/config/ConfigSpec.scala | 4 ++-- .../akka/routing/ConfiguredLocalRoutingSpec.scala | 15 ++++++++++++++- akka-actor/src/main/resources/reference.conf | 12 ++++++------ .../transactor/CoordinatedIncrementSpec.scala | 14 +++++++++++++- 5 files changed, 49 insertions(+), 11 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala index 629fb814c4..8b92fd144a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala @@ -9,8 +9,21 @@ import akka.util.duration._ import akka.util.Timeout import akka.dispatch.{ Await, Future } +object LocalActorRefProviderSpec { + val config = """ + akka { + actor { + default-dispatcher { + core-pool-size-min = 8 + core-pool-size-max = 16 + } + } + } + """ +} + @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class LocalActorRefProviderSpec extends AkkaSpec { +class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.config) { "An LocalActorRefProvider" must { "find actor refs using actorFor" in { diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index 5c7f2770c8..9b214423c3 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -26,8 +26,8 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference) { getString("akka.actor.default-dispatcher.type") must equal("Dispatcher") getString("akka.actor.default-dispatcher.name") must equal("default-dispatcher") getMilliseconds("akka.actor.default-dispatcher.keep-alive-time") must equal(60 * 1000) - getDouble("akka.actor.default-dispatcher.core-pool-size-factor") must equal(8.0) - getDouble("akka.actor.default-dispatcher.max-pool-size-factor") must equal(8.0) + getDouble("akka.actor.default-dispatcher.core-pool-size-factor") must equal(3.0) + getDouble("akka.actor.default-dispatcher.max-pool-size-factor") must equal(3.0) getInt("akka.actor.default-dispatcher.task-queue-size") must equal(-1) getString("akka.actor.default-dispatcher.task-queue-type") must equal("linked") getBoolean("akka.actor.default-dispatcher.allow-core-timeout") must equal(true) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index 032c4da921..f213379d17 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -7,8 +7,21 @@ import akka.testkit._ import akka.util.duration._ import akka.dispatch.Await +object ConfiguredLocalRoutingSpec { + val config = """ + akka { + actor { + default-dispatcher { + core-pool-size-min = 8 + core-pool-size-max = 16 + } + } + } + """ +} + @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ConfiguredLocalRoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { +class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.config) with DefaultTimeout with ImplicitSender { val deployer = system.asInstanceOf[ActorSystemImpl].provider.deployer diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index f6b7188192..efbaff0afc 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -165,23 +165,23 @@ akka { keep-alive-time = 60s # minimum number of threads to cap factor-based core number to - core-pool-size-min = 8 + core-pool-size-min = 6 # No of core threads ... ceil(available processors * factor) - core-pool-size-factor = 8.0 + core-pool-size-factor = 3.0 # maximum number of threads to cap factor-based number to - core-pool-size-max = 4096 + core-pool-size-max = 64 # Hint: max-pool-size is only used for bounded task queues # minimum number of threads to cap factor-based max number to - max-pool-size-min = 8 + max-pool-size-min = 6 # Max no of threads ... ceil(available processors * factor) - max-pool-size-factor = 8.0 + max-pool-size-factor = 3.0 # maximum number of threads to cap factor-based max number to - max-pool-size-max = 4096 + max-pool-size-max = 64 # Specifies the bounded capacity of the task queue (< 1 == unbounded) task-queue-size = -1 diff --git a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala index 47067d3595..c6e129f89b 100644 --- a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala @@ -14,6 +14,18 @@ import akka.testkit._ import scala.concurrent.stm._ object CoordinatedIncrement { + + val config = """ + akka { + actor { + default-dispatcher { + core-pool-size-min = 5 + core-pool-size-max = 16 + } + } + } + """ + case class Increment(friends: Seq[ActorRef]) case object GetCount @@ -49,7 +61,7 @@ object CoordinatedIncrement { } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class CoordinatedIncrementSpec extends AkkaSpec with BeforeAndAfterAll { +class CoordinatedIncrementSpec extends AkkaSpec(CoordinatedIncrement.config) with BeforeAndAfterAll { import CoordinatedIncrement._ implicit val timeout = Timeout(5.seconds.dilated) From e5a8b7ae927f75bb54334498dcfa9fb5d3f4ed95 Mon Sep 17 00:00:00 2001 From: Eugene Vigdorchik Date: Mon, 16 Jan 2012 15:38:23 +0400 Subject: [PATCH 81/84] Add schoir description. --- akka-docs/dev/multi-jvm-testing.rst | 32 +++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/akka-docs/dev/multi-jvm-testing.rst b/akka-docs/dev/multi-jvm-testing.rst index 9fc42349f2..1fddeb439b 100644 --- a/akka-docs/dev/multi-jvm-testing.rst +++ b/akka-docs/dev/multi-jvm-testing.rst @@ -345,3 +345,35 @@ you are on another platform you might need to install it yourself. Here is a port: http://info.iet.unipi.it/~luigi/dummynet + + +Running tests on many machines +============================== + +The same tests that are run on a single machine using sbt-multi-jvm can be run on multiple +machines using schoir (read the same as ``esquire``) plugin. The plugin is included just like sbt-multi-jvm:: + + resolvers += Classpaths.typesafeResolver + + addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.1") + +The interaction with the plugin is through ``schoir:master`` input task. This input task optionally accepts the +path to the file with the following properties:: + + git.url=git@github.com:jboner/akka.git + external.addresses.for.ssh=host1:port1,...,hostN:portN + internal.host.names=host1,...,hostN + +Alternative to specifying the property file, one can set respective settings in the build file:: + + gitUrl := "git@github.com:jboner/akka.git", + machinesExt := List(InetAddress("host1", port1)), + machinesInt := List("host1") + +The reason the first property is called ``git.url`` is that the plugin sets up a temporary remote branch on git +to test against the local working copy. After the tests are finished the changes are regained and the branch +is deleted. + +Each test machine starts a node in zookeeper server ensemble that can be used for synchronization. Since +the server is started on a fixed port, it's not currently possible to run more than one test session on the +same machine at the same time. \ No newline at end of file From 49de4eaac21a86d74cdb7a04e10d3031d727b38e Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 18 Jan 2012 09:27:11 +0100 Subject: [PATCH 82/84] DOC: Fixed wrong log samples in migration guide --- akka-docs/project/migration-guide-1.3.x-2.0.x.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/akka-docs/project/migration-guide-1.3.x-2.0.x.rst b/akka-docs/project/migration-guide-1.3.x-2.0.x.rst index 0c8d239d03..353e0c0ddb 100644 --- a/akka-docs/project/migration-guide-1.3.x-2.0.x.rst +++ b/akka-docs/project/migration-guide-1.3.x-2.0.x.rst @@ -253,11 +253,11 @@ v2.0:: import akka.event.Logging val log = Logging(context.system, this) - log.error(exception, this, message) - log.warning(this, message) - log.info(this, message) - log.debug(this, message) - log.debug(this, "Processing took {} ms", duration) + log.error(exception, message) + log.warning(message) + log.info(message) + log.debug(message) + log.debug("Processing took {} ms", duration) Documentation: From 16c41269bd9b4766290cfa6e739a291964a6824c Mon Sep 17 00:00:00 2001 From: Eugene Vigdorchik Date: Tue, 17 Jan 2012 13:04:16 +0400 Subject: [PATCH 83/84] Brought multi-jvm testing doc up-to-date. --- akka-docs/dev/multi-jvm-testing.rst | 97 +++++++++++++---------------- 1 file changed, 42 insertions(+), 55 deletions(-) diff --git a/akka-docs/dev/multi-jvm-testing.rst b/akka-docs/dev/multi-jvm-testing.rst index 1fddeb439b..85b8ae5027 100644 --- a/akka-docs/dev/multi-jvm-testing.rst +++ b/akka-docs/dev/multi-jvm-testing.rst @@ -22,17 +22,17 @@ You can add it as a plugin by adding the following to your plugins/build.sbt:: resolvers += Classpaths.typesafeResolver - addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.7") + addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9") You can then add multi-JVM testing to a project by including the ``MultiJvm`` -settings and config. For example, here is how the akka-cluster project adds +settings and config. For example, here is how the akka-remote project adds multi-JVM testing:: import MultiJvmPlugin.{ MultiJvm, extraOptions } lazy val cluster = Project( - id = "akka-cluster", - base = file("akka-cluster"), + id = "akka-remote", + base = file("akka-remote"), settings = defaultSettings ++ MultiJvmPlugin.settings ++ Seq( extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dconfig.file=" + _.absolutePath).toSeq @@ -52,26 +52,26 @@ Running tests The multi-jvm tasks are similar to the normal tasks: ``test``, ``test-only``, and ``run``, but are under the ``multi-jvm`` configuration. -So in Akka, to run all the multi-JVM tests in the akka-cluster project use (at +So in Akka, to run all the multi-JVM tests in the akka-remote project use (at the sbt prompt): .. code-block:: none - akka-cluster/multi-jvm:test + akka-remote/multi-jvm:test -Or one can change to the ``akka-cluster`` project first, and then run the +Or one can change to the ``akka-remote`` project first, and then run the tests: .. code-block:: none - project akka-cluster + project akka-remote multi-jvm:test To run individual tests use ``test-only``: .. code-block:: none - multi-jvm:test-only akka.cluster.deployment.Deployment + multi-jvm:test-only akka.remote.RandomRoutedRemoteActor More than one test name can be listed to run multiple specific tests. Tab-completion in sbt makes it easy to complete the test names. @@ -81,7 +81,7 @@ options after the test names and ``--``. For example: .. code-block:: none - multi-jvm:test-only akka.cluster.deployment.Deployment -- -Dsome.option=something + multi-jvm:test-only akka.remote.RandomRoutedRemoteActor -- -Dsome.option=something Creating application tests @@ -233,18 +233,23 @@ To run just these tests you would call ``multi-jvm:test-only sample.Spec`` at the sbt prompt. -ZookeeperBarrier -================ +Barriers +======== When running multi-JVM tests it's common to need to coordinate timing across -nodes. To do this there is a ZooKeeper-based double-barrier (there is both an -entry barrier and an exit barrier). ClusterNodes also have support for creating -barriers easily. To wait at the entry use the ``enter`` method. To wait at the -exit use the ``leave`` method. It's also possible t pass a block of code which +nodes. To do this, multi-JVM test framework has the notion of a double-barrier +(there is both an entry barrier and an exit barrier). +To wait at the entry use the ``enter`` method. To wait at the +exit use the ``leave`` method. It's also possible to pass a block of code which will be run between the barriers. -When creating a barrier you pass it a name and the number of nodes that are -expected to arrive at the barrier. You can also pass a timeout. The default +There are 2 implementations of the barrier: one is used for coordinating JVMs +running on a single machine and is based on local files, another used in a distributed +scenario (see below) and is based on apache ZooKeeper. These two cases +are differentiated with ``test.hosts`` property defined. The choice for a proper barrier +implementation is made in ``AkkaRemoteSpec`` which is a base class for all multi-JVM tests. + +When creating a barrier you pass it a name. You can also pass a timeout. The default timeout is 60 seconds. Here is an example of coordinating the starting of two nodes and then running @@ -258,21 +263,17 @@ something in coordination:: import akka.cluster._ - object SampleMultiJvmSpec { + object SampleMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec { val NrOfNodes = 2 + def commonConfig = ConfigFactory.parseString(""" + // Declare your configuration here. + """) } - class SampleMultiJvmNode1 extends WordSpec with MustMatchers with BeforeAndAfterAll { + class SampleMultiJvmNode1 extends AkkaRemoteSpec(SampleMultiJvmSpec.nodeConfigs(0)) + with WordSpec with MustMatchers { import SampleMultiJvmSpec._ - override def beforeAll() = { - Cluster.startLocalCluster() - } - - override def afterAll() = { - Cluster.shutdownLocalCluster() - } - "A cluster" must { "have jvm options" in { @@ -281,16 +282,15 @@ something in coordination:: } "be able to start all nodes" in { - LocalCluster.barrier("start", NrOfNodes) { - Cluster.node.start() - } - Cluster.node.isRunning must be(true) - Cluster.node.shutdown() + barrier("start") + println("All nodes are started!") + barrier("end") } } } - class SampleMultiJvmNode2 extends WordSpec with MustMatchers { + class SampleMultiJvmNode2 extends AkkaRemoteSpec(SampleMultiJvmSpec.nodeConfigs(1)) + with WordSpec with MustMatchers { import SampleMultiJvmSpec._ "A cluster" must { @@ -301,30 +301,13 @@ something in coordination:: } "be able to start all nodes" in { - LocalCluster.barrier("start", NrOfNodes) { - Cluster.node.start() - } - Cluster.node.isRunning must be(true) - Cluster.node.shutdown() + barrier("start") + println("All nodes are started!") + barrier("end") } } } -An example output from this would be: - -.. code-block:: none - - > multi-jvm:test-only sample.Sample - ... - [info] Starting JVM-Node1 for example.SampleMultiJvmNode1 - [info] Starting JVM-Node2 for example.SampleMultiJvmNode2 - [JVM-Node1] Loading config [akka.conf] from the application classpath. - [JVM-Node2] Loading config [akka.conf] from the application classpath. - ... - [JVM-Node2] Hello from node 2 - [JVM-Node1] Hello from node 1 - [success] - NetworkFailureTest ================== @@ -376,4 +359,8 @@ is deleted. Each test machine starts a node in zookeeper server ensemble that can be used for synchronization. Since the server is started on a fixed port, it's not currently possible to run more than one test session on the -same machine at the same time. \ No newline at end of file +same machine at the same time. + +The machines that are used for testing (slaves) should have ssh access to the outside world and be able to talk +to each other with the internal addresses given. On the master machine ssh client is required. Obviosly git +and sbt should be installed on both master and slave machines. \ No newline at end of file From 1b4983f8655601ddf39d09f44c00ca498135f00f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 18 Jan 2012 10:10:42 +0100 Subject: [PATCH 84/84] DOC: fixed minor path err --- akka-docs/general/addressing.rst | 244 +++++++++++++++---------------- 1 file changed, 122 insertions(+), 122 deletions(-) diff --git a/akka-docs/general/addressing.rst b/akka-docs/general/addressing.rst index cc87da61f3..86c22fdaea 100644 --- a/akka-docs/general/addressing.rst +++ b/akka-docs/general/addressing.rst @@ -21,68 +21,68 @@ within an actor system, please read on for the details. What is an Actor Reference? --------------------------- -An actor reference is a subtype of :class:`ActorRef`, whose foremost purpose is -to support sending messages to the actor it represents. Each actor has access -to its canonical (local) reference through the :meth:`self` field; this -reference is also included as sender reference by default for all messages sent -to other actors. Conversely, during message processing the actor has access to -a reference representing the sender of the current message through the +An actor reference is a subtype of :class:`ActorRef`, whose foremost purpose is +to support sending messages to the actor it represents. Each actor has access +to its canonical (local) reference through the :meth:`self` field; this +reference is also included as sender reference by default for all messages sent +to other actors. Conversely, during message processing the actor has access to +a reference representing the sender of the current message through the :meth:`sender` field. -There are several different types of actor references that are supported +There are several different types of actor references that are supported depending on the configuration of the actor system: -- Purely local actor references are used by actor systems which are not - configured to support networking functions. These actor references cannot +- Purely local actor references are used by actor systems which are not + configured to support networking functions. These actor references cannot ever be sent across a network connection while retaining their functionality. -- Local actor references when remoting is enabled are used by actor systems - which support networking functions for those references which represent - actors within the same JVM. In order to be recognizable also when sent to - other network nodes, these references include protocol and remote addressing +- Local actor references when remoting is enabled are used by actor systems + which support networking functions for those references which represent + actors within the same JVM. In order to be recognizable also when sent to + other network nodes, these references include protocol and remote addressing information. -- There is a subtype of local actor references which is used for routers (i.e. - actors mixing in the :class:`Router` trait). Its logical structure is the - same as for the aforementioned local references, but sending a message to +- There is a subtype of local actor references which is used for routers (i.e. + actors mixing in the :class:`Router` trait). Its logical structure is the + same as for the aforementioned local references, but sending a message to them dispatches to one of their children directly instead. -- Remote actor references represent actors which are reachable using remote - communication, i.e. sending messages to them will serialize the messages +- Remote actor references represent actors which are reachable using remote + communication, i.e. sending messages to them will serialize the messages transparently and send them to the other JVM. -- There are several special types of actor references which behave like local +- There are several special types of actor references which behave like local actor references for all practical purposes: - - :class:`AskActorRef` is the special representation of a :meth:`Promise` for - the purpose of being completed by the response from an actor; it is created + - :class:`AskActorRef` is the special representation of a :meth:`Promise` for + the purpose of being completed by the response from an actor; it is created by the :meth:`ActorRef.ask` invocation. - - :class:`DeadLetterActorRef` is the default implementation of the dead + - :class:`DeadLetterActorRef` is the default implementation of the dead letters service, where all messages are re-routed whose routees are shut down or non-existent. -- And then there are some one-off internal implementations which you should +- And then there are some one-off internal implementations which you should never really see: - - There is an actor reference which does not represent an actor but acts only - as a pseudo-supervisor for the root guardian, we call it “the one who walks + - There is an actor reference which does not represent an actor but acts only + as a pseudo-supervisor for the root guardian, we call it “the one who walks the bubbles of space-time”. - - The first logging service started before actually firing up actor creation - facilities is a fake actor reference which accepts log events and prints + - The first logging service started before actually firing up actor creation + facilities is a fake actor reference which accepts log events and prints them directly to standard output; it is :class:`Logging.StandardOutLogger`. -- **(Future Extension)** Cluster actor references represent clustered actor - services which may be replicated, migrated or load-balanced across multiple - cluster nodes. As such they are virtual names which the cluster service +- **(Future Extension)** Cluster actor references represent clustered actor + services which may be replicated, migrated or load-balanced across multiple + cluster nodes. As such they are virtual names which the cluster service translates into local or remote actor references as appropriate. What is an Actor Path? ---------------------- -Since actors are created in a strictly hierarchical fashion, there exists a -unique sequence of actor names given by recursively following the supervision -links between child and parent down towards the root of the actor system. This -sequence can be seen as enclosing folders in a file system, hence we adopted -the name “path” to refer to it. As in some real file-systems there also are -“symbolic links”, i.e. one actor may be reachable using more than one path, -where all but one involve some translation which decouples part of the path -from the actor’s actual supervision ancestor line; these specialities are +Since actors are created in a strictly hierarchical fashion, there exists a +unique sequence of actor names given by recursively following the supervision +links between child and parent down towards the root of the actor system. This +sequence can be seen as enclosing folders in a file system, hence we adopted +the name “path” to refer to it. As in some real file-systems there also are +“symbolic links”, i.e. one actor may be reachable using more than one path, +where all but one involve some translation which decouples part of the path +from the actor’s actual supervision ancestor line; these specialities are described in the sub-sections to follow. An actor path consists of an anchor, which identifies the actor system, @@ -93,80 +93,80 @@ are separated by slashes. Actor Path Anchors ^^^^^^^^^^^^^^^^^^ -Each actor path has an address component, describing the protocol and location -by which the corresponding actor is reachable, followed by the names of the +Each actor path has an address component, describing the protocol and location +by which the corresponding actor is reachable, followed by the names of the actors in the hierarchy from the root up. Examples are:: - "akka://my-system/app/service-a/worker1" // purely local - "akka://my-system@serv.example.com:5678/app/service-b" // local or remote + "akka://my-system/user/service-a/worker1" // purely local + "akka://my-system@serv.example.com:5678/user/service-b" // local or remote "cluster://my-cluster/service-c" // clustered (Future Extension) -Here, ``akka`` is the default remote protocol for the 2.0 release, and others -are pluggable. The interpretation of the host & port part (i.e. -``serv.example.com:5678`` in the example) depends on the transport mechanism +Here, ``akka`` is the default remote protocol for the 2.0 release, and others +are pluggable. The interpretation of the host & port part (i.e. +``serv.example.com:5678`` in the example) depends on the transport mechanism used, but it must abide by the URI structural rules. Logical Actor Paths ^^^^^^^^^^^^^^^^^^^ -The unique path obtained by following the parental supervision links towards -the root guardian is called the logical actor path. This path matches exactly -the creation ancestry of an actor, so it is completely deterministic as soon as -the actor system’s remoting configuration (and with it the address component of +The unique path obtained by following the parental supervision links towards +the root guardian is called the logical actor path. This path matches exactly +the creation ancestry of an actor, so it is completely deterministic as soon as +the actor system’s remoting configuration (and with it the address component of the path) is set. Physical Actor Paths ^^^^^^^^^^^^^^^^^^^^ -While the logical actor path describes the functional location within one actor -system, configuration-based remote deployment means that an actor may be -created on a different network host as its parent, i.e. within a different -actor system. In this case, following the actor path from the root guardian up -entails traversing the network, which is a costly operation. Therefore, each -actor also has a physical path, starting at the root guardian of the actor -system where the actual actor object resides. Using this path as sender -reference when querying other actors will let them reply directly to this +While the logical actor path describes the functional location within one actor +system, configuration-based remote deployment means that an actor may be +created on a different network host as its parent, i.e. within a different +actor system. In this case, following the actor path from the root guardian up +entails traversing the network, which is a costly operation. Therefore, each +actor also has a physical path, starting at the root guardian of the actor +system where the actual actor object resides. Using this path as sender +reference when querying other actors will let them reply directly to this actor, minimizing delays incurred by routing. -One important aspect is that a physical actor path never spans multiple actor -systems or JVMs. This means that the logical path (supervision hierarchy) and -the physical path (actor deployment) of an actor may diverge if one of its +One important aspect is that a physical actor path never spans multiple actor +systems or JVMs. This means that the logical path (supervision hierarchy) and +the physical path (actor deployment) of an actor may diverge if one of its ancestors is remotely supervised. Virtual Actor Paths **(Future Extension)** ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In order to be able to replicate and migrate actors across a cluster of Akka -nodes, another level of indirection has to be introduced. The cluster component -therefore provides a translation from virtual paths to physical paths which may +In order to be able to replicate and migrate actors across a cluster of Akka +nodes, another level of indirection has to be introduced. The cluster component +therefore provides a translation from virtual paths to physical paths which may change in reaction to node failures, cluster rebalancing, etc. -*This area is still under active development, expect updates in this section +*This area is still under active development, expect updates in this section for the 2.1 release.* How are Actor References obtained? ---------------------------------- -There are two general categories to how actor references may be obtained: by -creating actors or by looking them up, where the latter functionality comes in -the two flavours of creating actor references from concrete actor paths and +There are two general categories to how actor references may be obtained: by +creating actors or by looking them up, where the latter functionality comes in +the two flavours of creating actor references from concrete actor paths and querying the logical actor hierarchy. -*While local and remote actor references and their paths work in the same way -concerning the facilities mentioned below, the exact semantics of clustered -actor references and their paths—while certainly as similar as possible—may -differ in certain aspects, owing to the virtual nature of those paths. Expect +*While local and remote actor references and their paths work in the same way +concerning the facilities mentioned below, the exact semantics of clustered +actor references and their paths—while certainly as similar as possible—may +differ in certain aspects, owing to the virtual nature of those paths. Expect updates for the 2.1 release.* Creating Actors ^^^^^^^^^^^^^^^ -An actor system is typically started by creating actors above the guardian -actor using the :meth:`ActorSystem.actorOf` method and then using -:meth:`ActorContext.actorOf` from within the created actors to spawn the actor -tree. These methods return a reference to the newly created actor. Each actor -has direct access to references for its parent, itself and its children. These -references may be sent within messages to other actors, enabling those to reply +An actor system is typically started by creating actors above the guardian +actor using the :meth:`ActorSystem.actorOf` method and then using +:meth:`ActorContext.actorOf` from within the created actors to spawn the actor +tree. These methods return a reference to the newly created actor. Each actor +has direct access to references for its parent, itself and its children. These +references may be sent within messages to other actors, enabling those to reply directly. Looking up Actors by Concrete Path @@ -187,12 +187,12 @@ is not visible to client code. Absolute vs. Relative Paths ``````````````````````````` -In addition to :meth:`ActorSystem.actorFor` there is also -:meth:`ActorContext.actorFor`, which is available inside any actor as -``context.actorFor``. This yields an actor reference much like its twin on -:class:`ActorSystem`, but instead of looking up the path starting from the root -of the actor tree it starts out on the current actor. Path elements consisting -of two dots (``".."``) may be used to access the parent actor. You can for +In addition to :meth:`ActorSystem.actorFor` there is also +:meth:`ActorContext.actorFor`, which is available inside any actor as +``context.actorFor``. This yields an actor reference much like its twin on +:class:`ActorSystem`, but instead of looking up the path starting from the root +of the actor tree it starts out on the current actor. Path elements consisting +of two dots (``".."``) may be used to access the parent actor. You can for example send a message to a specific sibling:: context.actorFor("../brother") ! msg @@ -208,25 +208,25 @@ will work as expected. Querying the Logical Actor Hierarchy ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Since the actor system forms a file-system like hierarchy, matching on paths is -possible in the same was as supported by Unix shells: you may replace (parts -of) path element names with wildcards (`«*»` and `«?»`) to formulate a -selection which may match zero or more actual actors. Because the result is not -a single actor reference, it has a different type :class:`ActorSelection` and -does not support the full set of operations an :class:`ActorRef` does. -Selections may be formulated using the :meth:`ActorSystem.actorSelection` and +Since the actor system forms a file-system like hierarchy, matching on paths is +possible in the same was as supported by Unix shells: you may replace (parts +of) path element names with wildcards (`«*»` and `«?»`) to formulate a +selection which may match zero or more actual actors. Because the result is not +a single actor reference, it has a different type :class:`ActorSelection` and +does not support the full set of operations an :class:`ActorRef` does. +Selections may be formulated using the :meth:`ActorSystem.actorSelection` and :meth:`ActorContext.actorSelection` methods and do support sending messages:: context.actorSelection("../*") ! msg -will send `msg` to all siblings including the current actor. As for references -obtained using `actorFor`, a traversal of the supervision hierarchy is done in -order to perform the message send. As the exact set of actors which match a -selection may change even while a message is making its way to the recipients, -it is not possible to watch a selection for liveliness changes. In order to do -that, resolve the uncertainty by sending a request and gathering all answers, -extracting the sender references, and then watch all discovered concrete -actors. This scheme of resolving a selection may be improved upon in a future +will send `msg` to all siblings including the current actor. As for references +obtained using `actorFor`, a traversal of the supervision hierarchy is done in +order to perform the message send. As the exact set of actors which match a +selection may change even while a message is making its way to the recipients, +it is not possible to watch a selection for liveliness changes. In order to do +that, resolve the uncertainty by sending a request and gathering all answers, +extracting the sender references, and then watch all discovered concrete +actors. This scheme of resolving a selection may be improved upon in a future release. .. _actorOf-vs-actorFor: @@ -269,50 +269,50 @@ The Interplay with Clustering **(Future Extension)** *This section is subject to change!* -When creating a scaled-out actor subtree, a cluster name is created for a -routed actor reference, where sending to this reference will send to one (or -more) of the actual actors created in the cluster. In order for those actors to -be able to query other actors while processing their messages, their sender -reference must be unique for each of the replicas, which means that physical -paths will be used as ``self`` references for these instances. In the case -of replication for achieving fault-tolerance the opposite is required: the -``self`` reference will be a virtual (cluster) path so that in case of +When creating a scaled-out actor subtree, a cluster name is created for a +routed actor reference, where sending to this reference will send to one (or +more) of the actual actors created in the cluster. In order for those actors to +be able to query other actors while processing their messages, their sender +reference must be unique for each of the replicas, which means that physical +paths will be used as ``self`` references for these instances. In the case +of replication for achieving fault-tolerance the opposite is required: the +``self`` reference will be a virtual (cluster) path so that in case of migration or fail-over communication is resumed with the fresh instance. What is the Address part used for? ---------------------------------- -When sending an actor reference across the network, it is represented by its -path. Hence, the path must fully encode all information necessary to send -messages to the underlying actor. This is achieved by encoding protocol, host -and port in the address part of the path string. When an actor system receives -an actor path from a remote node, it checks whether that path’s address matches -the address of this actor system, in which case it will be resolved to the -actor’s local reference. Otherwise, it will be represented by a remote actor +When sending an actor reference across the network, it is represented by its +path. Hence, the path must fully encode all information necessary to send +messages to the underlying actor. This is achieved by encoding protocol, host +and port in the address part of the path string. When an actor system receives +an actor path from a remote node, it checks whether that path’s address matches +the address of this actor system, in which case it will be resolved to the +actor’s local reference. Otherwise, it will be represented by a remote actor reference. Special Paths used by Akka -------------------------- -At the root of the path hierarchy resides the root guardian above which all +At the root of the path hierarchy resides the root guardian above which all other actors are found. The next level consists of the following: -- ``"/user"`` is the guardian actor for all user-created top-level actors; +- ``"/user"`` is the guardian actor for all user-created top-level actors; actors created using :meth:`ActorSystem.actorOf` are found at the next level. -- ``"/system"`` is the guardian actor for all system-created top-level actors, - e.g. logging listeners or actors automatically deployed by configuration at +- ``"/system"`` is the guardian actor for all system-created top-level actors, + e.g. logging listeners or actors automatically deployed by configuration at the start of the actor system. -- ``"/deadLetters"`` is the dead letter actor, which is where all messages sent to +- ``"/deadLetters"`` is the dead letter actor, which is where all messages sent to stopped or non-existing actors are re-routed. -- ``"/temp"`` is the guardian for all short-lived system-created actors, e.g. +- ``"/temp"`` is the guardian for all short-lived system-created actors, e.g. those which are used in the implementation of :meth:`ActorRef.ask`. -- ``"/remote"`` is an artificial path below which all actors reside whose +- ``"/remote"`` is an artificial path below which all actors reside whose supervisors are remote actor references Future extensions: -- ``"/service"`` is an artificial path below which actors can be presented by - means of configuration, i.e. deployed at system start-up or just-in-time +- ``"/service"`` is an artificial path below which actors can be presented by + means of configuration, i.e. deployed at system start-up or just-in-time (triggered by look-up) - ``"/alias"`` is an artificial path below which other actors may be “mounted” (as in the Unix file-system sense) by path—local or remote—to give them