From ce128740abf8272d795d528ce58e043747e7636d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 8 Dec 2011 14:06:20 +0100 Subject: [PATCH 01/27] Updated documentation of Actors (Java). See #1435 * Aligned the Java and Scala documentation for Actors * Implemented hotswap samples in Java, and documented in same way as Scala docs * Improved Actors (Scala) docs * Fixed wrong preRestart and postRestart in UntypedActor * Changed name of Dispatchers.fromConfig to newFromConfig and made it Java friendly * Added ActorRef.ask with Timeout parameter in addition to the timeoutMillis --- .../akka/actor/dispatch/DispatchersSpec.scala | 8 +- .../src/main/scala/akka/actor/ActorRef.scala | 22 +- .../main/scala/akka/actor/UntypedActor.scala | 16 +- .../akka/dispatch/AbstractDispatcher.scala | 2 +- .../scala/akka/dispatch/Dispatchers.scala | 15 +- akka-docs/general/jmm.rst | 2 + .../akka/docs/actor/FirstUntypedActor.java | 17 + .../akka/docs/actor/ImmutableMessage.java | 25 + .../actor/MyReceivedTimeoutUntypedActor.java | 26 + .../code/akka/docs/actor/MyUntypedActor.java | 20 + .../akka/docs/actor/UntypedActorSwapper.java | 53 ++ .../akka/docs/actor/UntypedActorTest.scala | 5 + .../akka/docs/actor/UntypedActorTestBase.java | 239 +++++++ akka-docs/java/untyped-actors.rst | 594 ++++++++++-------- akka-docs/scala/actors.rst | 98 ++- akka-docs/scala/code/ActorDocSpec.scala | 31 +- .../src/main/scala/akka/remote/Remote.scala | 2 +- 17 files changed, 848 insertions(+), 327 deletions(-) create mode 100644 akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java create mode 100644 akka-docs/java/code/akka/docs/actor/ImmutableMessage.java create mode 100644 akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java create mode 100644 akka-docs/java/code/akka/docs/actor/MyUntypedActor.java create mode 100644 akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java create mode 100644 akka-docs/java/code/akka/docs/actor/UntypedActorTest.scala create mode 100644 akka-docs/java/code/akka/docs/actor/UntypedActorTestBase.java diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala index d23bc8ce57..e6ae3c0457 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala @@ -58,13 +58,13 @@ class DispatchersSpec extends AkkaSpec { dispatcher.map(_.throughput) must be(Some(17)) } - "use defined properties when fromConfig" in { - val dispatcher = fromConfig("myapp.mydispatcher", cfg = dispatcherConf) + "use defined properties when newFromConfig" in { + val dispatcher = newFromConfig("myapp.mydispatcher", defaultGlobalDispatcher, dispatcherConf) dispatcher.throughput must be(17) } - "use specific name when fromConfig" in { - val dispatcher = fromConfig("myapp.mydispatcher", cfg = dispatcherConf) + "use specific name when newFromConfig" in { + val dispatcher = newFromConfig("myapp.mydispatcher", defaultGlobalDispatcher, dispatcherConf) dispatcher.name must be("mydispatcher") } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 648e671c50..97e36086db 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -86,13 +86,17 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable * Sends a message asynchronously returns a future holding the eventual reply message. *

* NOTE: - * Use this method with care. In most cases it is better to use 'tell' together with the 'getContext().getSender()' to - * implement request/response message exchanges. + * Use this method with care. In most cases it is better to use 'tell' together with the sender + * parameter to implement non-blocking request/response message exchanges. *

- * If you are sending messages using ask then you have to use getContext().sender().tell(...) - * to send a reply message to the original sender. If not then the sender will block until the timeout expires. + * If you are sending messages using ask and using blocking operations on the Future, such as + * 'get', then you have to use getContext().sender().tell(...) + * in the target actor to send a reply message to the original sender, and thereby completing the Future, + * otherwise the sender will block until the timeout expires. */ - def ask(message: AnyRef, timeout: Long): Future[AnyRef] = ?(message, Timeout(timeout)).asInstanceOf[Future[AnyRef]] + def ask(message: AnyRef, timeout: Timeout): Future[AnyRef] = ?(message, timeout).asInstanceOf[Future[AnyRef]] + + def ask(message: AnyRef, timeoutMillis: Long): Future[AnyRef] = ask(message, new Timeout(timeoutMillis)) /** * Forwards the message and passes the original sender actor as the sender. @@ -147,6 +151,14 @@ trait ScalaActorRef { ref: ActorRef ⇒ /** * Sends a message asynchronously, returning a future which may eventually hold the reply. + * NOTE: + * Use this method with care. In most cases it is better to use '!' together with implicit or explicit + * sender parameter to implement non-blocking request/response message exchanges. + *

+ * If you are sending messages using ask and using blocking operations on the Future, such as + * 'get', then you have to use getContext().sender().tell(...) + * in the target actor to send a reply message to the original sender, and thereby completing the Future, + * otherwise the sender will block until the timeout expires. */ def ?(message: Any)(implicit timeout: Timeout): Future[Any] diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index 3ac3e7770d..8c10a9fcf0 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -75,30 +75,36 @@ abstract class UntypedActor extends Actor { /** * User overridable callback. *

- * Is called when an Actor is started, this only happens at most once in the life of an actor. + * Is called when an Actor is started. + * Actor are automatically started asynchronously when created. + * Empty default implementation. */ override def preStart() {} /** * User overridable callback. *

- * Is called when 'actor.stop()' is invoked. + * Is called asynchronously after 'actor.stop()' is invoked. + * Empty default implementation. */ override def postStop() {} /** * User overridable callback. *

- * Is called on a crashed Actor right BEFORE it is restarted to allow clean up of resources before Actor is terminated. + * Is called on a crashed Actor right BEFORE it is restarted to allow clean + * up of resources before Actor is terminated. + * By default it calls postStop() */ - override def preRestart(reason: Throwable, lastMessage: Option[Any]) {} + override def preRestart(reason: Throwable, message: Option[Any]) { postStop() } /** * User overridable callback. *

* Is called right AFTER restart on the newly created Actor to allow reinitialization after an Actor crash. + * By default it calls preStart() */ - override def postRestart(reason: Throwable) {} + override def postRestart(reason: Throwable) { preStart() } /** * User overridable callback. diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index b7f2afc8f7..2c51530ee5 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -262,7 +262,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext } /** - * Trait to be used for hooking in new dispatchers into Dispatchers.fromConfig + * Trait to be used for hooking in new dispatchers into Dispatchers.from(cfg: Config) */ abstract class MessageDispatcherConfigurator() { /** diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index c905a7297d..a75eca9101 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -173,12 +173,11 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc new BalancingDispatcher(prerequisites, name, throughput, throughputDeadline, mailboxType, config, settings.DispatcherDefaultShutdown), ThreadPoolConfig()) /** - * Utility function that tries to load the specified dispatcher config from the akka.conf + * Creates a new dispatcher as specified in configuration * or if not defined it uses the supplied dispatcher. - * Uses default values from default-dispatcher, i.e. all options doesn't need to be defined - * in config. + * Uses default values from default-dispatcher, i.e. all options doesn't need to be defined. */ - def fromConfig(key: String, default: ⇒ MessageDispatcher = defaultGlobalDispatcher, cfg: Config = settings.config): MessageDispatcher = { + def newFromConfig(key: String, default: ⇒ MessageDispatcher, cfg: Config): MessageDispatcher = { import scala.collection.JavaConverters._ def simpleName = key.substring(key.lastIndexOf('.') + 1) cfg.hasPath(key) match { @@ -190,6 +189,14 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc } } + /** + * Creates a new dispatcher as specified in configuration, or if not defined it uses + * the default dispatcher. + * Uses default configuration values from default-dispatcher, i.e. all options doesn't + * need to be defined. + */ + def newFromConfig(key: String): MessageDispatcher = newFromConfig(key, defaultGlobalDispatcher, settings.config) + /* * Creates of obtains a dispatcher from a ConfigMap according to the format below. * Uses default values from default-dispatcher. diff --git a/akka-docs/general/jmm.rst b/akka-docs/general/jmm.rst index a84bad2dd2..ecb6dad6f1 100644 --- a/akka-docs/general/jmm.rst +++ b/akka-docs/general/jmm.rst @@ -61,6 +61,8 @@ volatile variable rule. This means that you, the Akka user, do not need to worry such a "happens before" relation, because it is the responsibility of Akka. So you have your hands free to deal with your business logic, and the Akka framework makes sure that those rules are guaranteed on your behalf. +.. _jmm-shared-state: + Actors and shared mutable state ------------------------------- diff --git a/akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java b/akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java new file mode 100644 index 0000000000..42b94df39c --- /dev/null +++ b/akka-docs/java/code/akka/docs/actor/FirstUntypedActor.java @@ -0,0 +1,17 @@ +package akka.docs.actor; + +import akka.actor.ActorRef; +import static akka.actor.Actors.*; +import akka.actor.UntypedActor; + +//#context-actorOf +public class FirstUntypedActor extends UntypedActor { + ActorRef myActor = getContext().actorOf(MyActor.class); + + //#context-actorOf + + public void onReceive(Object message) { + myActor.forward(message, getContext()); + myActor.tell(poisonPill()); + } +} diff --git a/akka-docs/java/code/akka/docs/actor/ImmutableMessage.java b/akka-docs/java/code/akka/docs/actor/ImmutableMessage.java new file mode 100644 index 0000000000..20af6de1fb --- /dev/null +++ b/akka-docs/java/code/akka/docs/actor/ImmutableMessage.java @@ -0,0 +1,25 @@ +package akka.docs.actor; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +//#immutable-message +public class ImmutableMessage { + private final int sequenceNumber; + private final List values; + + public ImmutableMessage(int sequenceNumber, List values) { + this.sequenceNumber = sequenceNumber; + this.values = Collections.unmodifiableList(new ArrayList(values)); + } + + public int getSequenceNumber() { + return sequenceNumber; + } + + public List getValues() { + return values; + } +} +//#immutable-message diff --git a/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java b/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java new file mode 100644 index 0000000000..dc88f3d78c --- /dev/null +++ b/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java @@ -0,0 +1,26 @@ +package akka.docs.actor; + +//#receive-timeout +import akka.actor.Actors; +import akka.actor.ReceiveTimeout; +import akka.actor.UnhandledMessageException; +import akka.actor.UntypedActor; +import akka.util.Duration; + +public class MyReceivedTimeoutUntypedActor extends UntypedActor { + + public MyReceivedTimeoutUntypedActor() { + getContext().setReceiveTimeout(Duration.parse("30 seconds")); + } + + public void onReceive(Object message) throws Exception { + if (message.equals("Hello")) { + getSender().tell("Hello world"); + } else if (message == Actors.receiveTimeout()) { + throw new RuntimeException("received timeout"); + } else { + throw new UnhandledMessageException(message, getSelf()); + } + } +} +//#receive-timeout \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/actor/MyUntypedActor.java b/akka-docs/java/code/akka/docs/actor/MyUntypedActor.java new file mode 100644 index 0000000000..203ad5e596 --- /dev/null +++ b/akka-docs/java/code/akka/docs/actor/MyUntypedActor.java @@ -0,0 +1,20 @@ +package akka.docs.actor; + +//#my-untyped-actor +import akka.actor.UntypedActor; +import akka.actor.UnhandledMessageException; +import akka.event.Logging; +import akka.event.LoggingAdapter; + +public class MyUntypedActor extends UntypedActor { + LoggingAdapter log = Logging.getLogger(getContext().system(), this); + + public void onReceive(Object message) throws Exception { + if (message instanceof String) + log.info("Received String message: {}", message); + else + throw new UnhandledMessageException(message, getSelf()); + } +} +//#my-untyped-actor + diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java b/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java new file mode 100644 index 0000000000..106407b4b8 --- /dev/null +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java @@ -0,0 +1,53 @@ +package akka.docs.actor; + +import static akka.docs.actor.UntypedActorSwapper.Swap.SWAP; +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.actor.UnhandledMessageException; +import akka.actor.UntypedActor; +import akka.event.Logging; +import akka.event.LoggingAdapter; +import akka.japi.Procedure; + +//#swapper +public class UntypedActorSwapper { + + public static class Swap { + public static Swap SWAP = new Swap(); + + private Swap() { + } + } + + public static class Swapper extends UntypedActor { + LoggingAdapter log = Logging.getLogger(getContext().system(), this); + + public void onReceive(Object message) throws Exception { + if (message == SWAP) { + log.info("Hi"); + getContext().become(new Procedure() { + @Override + public void apply(Object message) { + log.info("Ho"); + getContext().unbecome(); // resets the latest 'become' (just for fun) + } + }); + } else { + throw new UnhandledMessageException(message, getSelf()); + } + } + } + + public static void main(String... args) { + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef swap = system.actorOf(Swapper.class); + swap.tell(SWAP); // logs Hi + swap.tell(SWAP); // logs Ho + swap.tell(SWAP); // logs Hi + swap.tell(SWAP); // logs Ho + swap.tell(SWAP); // logs Hi + swap.tell(SWAP); // logs Ho + } + +} +//#swapper \ No newline at end of file diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorTest.scala b/akka-docs/java/code/akka/docs/actor/UntypedActorTest.scala new file mode 100644 index 0000000000..1747f30f92 --- /dev/null +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorTest.scala @@ -0,0 +1,5 @@ +package akka.docs.actor + +import org.scalatest.junit.JUnitSuite + +class UntypedActorTest extends UntypedActorTestBase with JUnitSuite diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorTestBase.java new file mode 100644 index 0000000000..756618eef5 --- /dev/null +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorTestBase.java @@ -0,0 +1,239 @@ +package akka.docs.actor; + +//#imports +import akka.actor.ActorRef; +import akka.actor.ActorSystem; + +//#imports + +//#import-future +import akka.dispatch.Future; + +//#import-future + +//#import-actors +import static akka.actor.Actors.*; + +//#import-actors + +//#import-procedure +import akka.japi.Procedure; + +//#import-procedure + +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.actor.UntypedActorFactory; +import akka.dispatch.MessageDispatcher; + +import org.junit.Test; + +import scala.Option; + +import static org.junit.Assert.*; + +public class UntypedActorTestBase { + + @Test + public void systemActorOf() { + //#system-actorOf + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef myActor = system.actorOf(MyUntypedActor.class); + //#system-actorOf + myActor.tell("test"); + system.stop(); + } + + @Test + public void contextActorOf() { + //#context-actorOf + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef myActor = system.actorOf(MyUntypedActor.class); + //#context-actorOf + myActor.tell("test"); + system.stop(); + } + + @Test + public void constructorActorOf() { + ActorSystem system = ActorSystem.create("MySystem"); + //#creating-constructor + // allows passing in arguments to the MyActor constructor + ActorRef myActor = system.actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new MyActor("..."); + } + }); + //#creating-constructor + myActor.tell("test"); + system.stop(); + } + + @Test + public void propsActorOf() { + ActorSystem system = ActorSystem.create("MySystem"); + //#creating-props + MessageDispatcher dispatcher = system.dispatcherFactory().newFromConfig("my-dispatcher"); + ActorRef myActor = system.actorOf(new Props().withCreator(MyUntypedActor.class).withDispatcher(dispatcher), + "myactor"); + //#creating-props + myActor.tell("test"); + system.stop(); + } + + @Test + public void usingAsk() { + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef myActor = system.actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new MyAskActor(); + } + }); + + //#using-ask + Future future = myActor.ask("Hello", 1000); + future.await(); + if (future.isCompleted()) { + Option resultOption = future.result(); + if (resultOption.isDefined()) { + Object result = resultOption.get(); + // ... + } else { + //... whatever + } + } + //#using-ask + system.stop(); + } + + @Test + public void receiveTimeout() { + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef myActor = system.actorOf(MyReceivedTimeoutUntypedActor.class); + myActor.tell("Hello"); + system.stop(); + } + + @Test + public void usePoisonPill() { + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef myActor = system.actorOf(MyUntypedActor.class); + //#poison-pill + myActor.tell(poisonPill()); + //#poison-pill + system.stop(); + } + + @Test + public void useKill() { + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef victim = system.actorOf(MyUntypedActor.class); + //#kill + victim.tell(kill()); + //#kill + system.stop(); + } + + @Test + public void useBecome() { + ActorSystem system = ActorSystem.create("MySystem"); + ActorRef myActor = system.actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new HotSwapActor(); + } + }); + myActor.tell("foo"); + myActor.tell("bar"); + myActor.tell("bar"); + system.stop(); + } + + public static class MyActor extends UntypedActor { + + public MyActor(String s) { + } + + public void onReceive(Object message) throws Exception { + try { + operation(); + } catch (Exception e) { + getSender().tell(new akka.actor.Status.Failure(e)); + throw e; + } + } + + private void operation() { + } + + //#lifecycle-callbacks + public void preStart() { + } + + public void preRestart(Throwable reason, Option message) { + postStop(); + } + + public void postRestart(Throwable reason) { + preStart(); + } + + public void postStop() { + } + //#lifecycle-callbacks + } + + public static class MyAskActor extends UntypedActor { + + public void onReceive(Object message) throws Exception { + //#reply-exception + try { + String result = operation(); + getSender().tell(result); + } catch (Exception e) { + getSender().tell(new akka.actor.Status.Failure(e)); + throw e; + } + //#reply-exception + } + + private String operation() { + return "Hi"; + } + } + + //#hot-swap-actor + public static class HotSwapActor extends UntypedActor { + + Procedure angry = new Procedure() { + @Override + public void apply(Object message) { + if (message.equals("foo")) { + getSender().tell("I am already angry?"); + } else if (message.equals("foo")) { + getContext().become(happy); + } + } + }; + + Procedure happy = new Procedure() { + @Override + public void apply(Object message) { + if (message.equals("bar")) { + getSender().tell("I am already happy :-)"); + } else if (message.equals("foo")) { + getContext().become(angry); + } + } + }; + + public void onReceive(Object message) { + if (message.equals("bar")) { + getContext().become(angry); + } else if (message.equals("foo")) { + getContext().become(happy); + } + } + } + //#hot-swap-actor + +} diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index 9defdf4607..2e6e68b215 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -1,370 +1,470 @@ + .. _untyped-actors-java: -Actors (Java) -============= +################ + Actors (Java) +################ + .. sidebar:: Contents .. contents:: :local: -Module stability: **SOLID** -The `Actor Model `_ provides a higher level of abstraction for writing concurrent and distributed systems. It alleviates the developer from having to deal with explicit locking and thread management, making it easier to write correct concurrent and parallel systems. Actors were defined in the 1973 paper by Carl Hewitt but have been popularized by the Erlang language, and used for example at Ericsson with great success to build highly concurrent and reliable telecom systems. +The `Actor Model`_ provides a higher level of abstraction for writing concurrent +and distributed systems. It alleviates the developer from having to deal with +explicit locking and thread management, making it easier to write correct +concurrent and parallel systems. Actors were defined in the 1973 paper by Carl +Hewitt but have been popularized by the Erlang language, and used for example at +Ericsson with great success to build highly concurrent and reliable telecom +systems. + +The API of Akka’s Actors is similar to Scala Actors which has borrowed some of +its syntax from Erlang. + +.. _Actor Model: http://en.wikipedia.org/wiki/Actor_model + + +Creating Actors +=============== + Defining an Actor class ----------------------- -Actors in Java are created either by extending the 'UntypedActor' class and implementing the 'onReceive' method. This method takes the message as a parameter. +Actor in Java are implemented by extending the ``UntypedActor`` class and implementing the +:meth:`onReceive` method. This method takes the message as a parameter. Here is an example: +.. includecode:: code/akka/docs/actor/MyUntypedActor.java#my-untyped-actor + +Creating Actors with default constructor +---------------------------------------- + +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java + :include: imports,system-actorOf + +The call to :meth:`actorOf` returns an instance of ``ActorRef``. This is a handle to +the ``UntypedActor`` instance which you can use to interact with the ``UntypedActor``. The +``ActorRef`` is immutable and has a one to one relationship with the Actor it +represents. The ``ActorRef`` is also serializable and network-aware. This means +that you can serialize it, send it over the wire and use it on a remote host and +it will still be representing the same Actor on the original node, across the +network. + +In the above example the actor was created from the system. It is also possible +to create actors from other actors with the actor ``context``. The difference is +how the supervisor hierarchy is arranged. When using the context the current actor +will be supervisor of the created child actor. When using the system it will be +a top level actor, that is supervised by the system (internal guardian actor). + +.. includecode:: code/akka/docs/actor/FirstUntypedActor.java#context-actorOf + +Actors are automatically started asynchronously when created. +When you create the ``UntypedActor`` then it will automatically call the ``preStart`` +callback method on the ``UntypedActor`` class. This is an excellent place to +add initialization code for the actor. + .. code-block:: java - import akka.actor.UntypedActor; - import akka.event.EventHandler; - - public class SampleUntypedActor extends UntypedActor { - - public void onReceive(Object message) throws Exception { - if (message instanceof String) - EventHandler.info(this, String.format("Received String message: %s", - message)); - else - throw new IllegalArgumentException("Unknown message: " + message); - } + @Override + public void preStart() { + ... // initialization code } -Creating Actors -^^^^^^^^^^^^^^^ - -Creating an Actor is done using the 'akka.actor.Actors.actorOf' factory method. This method returns a reference to the UntypedActor's ActorRef. This 'ActorRef' is an immutable serializable reference that you should use to communicate with the actor, send messages, link to it etc. This reference also functions as the context for the actor and holds run-time type information such as sender of the last message, - -.. code-block:: java - - ActorRef myActor = Actors.actorOf(SampleUntypedActor.class); - -Normally you would want to import the 'actorOf' method like this: - -.. code-block:: java - - import static akka.actor.Actors.*; - ActorRef myActor = actorOf(SampleUntypedActor.class); - -To avoid prefix it with 'Actors' every time you use it. - -You can also create & start the actor in one statement: - -.. code-block:: java - - ActorRef myActor = actorOf(SampleUntypedActor.class); - -The call to 'actorOf' returns an instance of 'ActorRef'. This is a handle to the 'UntypedActor' instance which you can use to interact with the Actor, like send messages to it etc. more on this shortly. The 'ActorRef' is immutable and has a one to one relationship with the Actor it represents. The 'ActorRef' is also serializable and network-aware. This means that you can serialize it, send it over the wire and use it on a remote host and it will still be representing the same Actor on the original node, across the network. - Creating Actors with non-default constructor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +-------------------------------------------- -If your UntypedActor has a constructor that takes parameters then you can't create it using 'actorOf(clazz)'. Instead you can use a variant of 'actorOf' that takes an instance of an 'UntypedActorFactory' in which you can create the Actor in any way you like. If you use this method then you to make sure that no one can get a reference to the actor instance. If they can get a reference it then they can touch state directly in bypass the whole actor dispatching mechanism and create race conditions which can lead to corrupt data. +If your UntypedActor has a constructor that takes parameters then you can't create it using 'actorOf(clazz)'. +Instead you can use a variant of ``actorOf`` that takes an instance of an 'UntypedActorFactory' +in which you can create the Actor in any way you like. If you use this method then you to make sure that +no one can get a reference to the actor instance. If they can get a reference it then they can +touch state directly in bypass the whole actor dispatching mechanism and create race conditions +which can lead to corrupt data. Here is an example: -.. code-block:: java - - ActorRef actor = actorOf(new UntypedActorFactory() { - public UntypedActor create() { - return new MyUntypedActor("service:name", 5); - } - }); +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java#creating-constructor This way of creating the Actor is also great for integrating with Dependency Injection (DI) frameworks like Guice or Spring. -UntypedActor context --------------------- +Creating Actors with Props +-------------------------- -The UntypedActor base class contains almost no member fields or methods to invoke. It only has the 'onReceive(Object message)' method, which is defining the Actor's message handler, and some life-cycle callbacks that you can choose to implement: -## preStart -## postStop -## preRestart -## postRestart +``Props`` is a configuration object to specify additional things for the actor to +be created, such as the ``MessageDispatcher``. -Most of the API is in the UnypedActorRef a reference for the actor. This reference is available in the 'getContext()' method in the UntypedActor (or you can use its alias, the 'context()' method, if you prefer. Here, for example, you find methods to reply to messages, send yourself messages, define timeouts, fault tolerance etc., start and stop etc. +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java#creating-props -Identifying Actors ------------------- -Each ActorRef has two methods: -* getContext().getUuid(); -* getContext().getId(); +UntypedActor API +================ -The difference is that the 'uuid' is generated by the runtime, guaranteed to be unique and can't be modified. While the 'id' can be set by the user (using 'getContext().setId(...)', and defaults to Actor class name. You can retrieve Actors by both UUID and ID using the 'ActorRegistry', see the section further down for details. +The :class:`UntypedActor` class defines only one abstract method, the above mentioned +:meth:`onReceive(Object message)`, which implements the behavior of the actor. -Messages and immutability -------------------------- +In addition, it offers: -**IMPORTANT**: Messages can be any kind of object but have to be immutable. Akka can’t enforce immutability (yet) so this has to be by convention. +* :obj:`getSelf()` reference to the :class:`ActorRef` of the actor +* :obj:`getSender()` reference sender Actor of the last received message, typically used as described in :ref:`UntypedActor.Reply` +* :obj:`getContext()` exposes contextual information for the actor and the current message, such as: -Send messages + * factory methods to create child actors (:meth:`actorOf`) + * system that the actor belongs to + * parent supervisor + * supervised children + * hotswap behavior stack as described in :ref:`UntypedActor.HotSwap` + +The remaining visible methods are user-overridable life-cycle hooks which are +described in the following: + +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java#lifecycle-callbacks + +The implementations shown above are the defaults provided by the :class:`UntypedActor` +class. + + +Start Hook +---------- + +Right after starting the actor, its :meth:`preStart` method is invoked. + +:: + + @Override + public void preStart() { + // registering with other actors + someService.tell(Register(getSelf()); + } + + +Restart Hooks ------------- -Messages are sent to an Actor through one of the 'send' methods. -* 'tell' means “fire-and-forget”, e.g. send a message asynchronously and return immediately. -* 'ask' sends a message asynchronously and returns a 'Future'. +All actors are supervised, i.e. linked to another actor with a fault +handling strategy. Actors will be restarted in case an exception is thrown while +processing a message. This restart involves the hooks mentioned above: -In all these methods you have the option of passing along your 'ActorRef' context variable. Make it a practice of doing so because it will allow the receiver actors to be able to respond to your message, since the sender reference is sent along with the message. +1. The old actor is informed by calling :meth:`preRestart` with the exception + which caused the restart and the message which triggered that exception; the + latter may be ``None`` if the restart was not caused by processing a + message, e.g. when a supervisor does not trap the exception and is restarted + in turn by its supervisor. This method is the best place for cleaning up, + preparing hand-over to the fresh actor instance, etc. + By default it calls :meth:`postStop`. +2. The initial factory from the ``actorOf`` call is used + to produce the fresh instance. +3. The new actor’s :meth:`postRestart` method is invoked with the exception + which caused the restart. By default the :meth:`preStart` + is called, just as in the normal start-up case. -Fire-forget -^^^^^^^^^^^ -This is the preferred way of sending messages. No blocking waiting for a message. Give best concurrency and scalability characteristics. +An actor restart replaces only the actual actor object; the contents of the +mailbox and the hotswap stack are unaffected by the restart, so processing of +messages will resume after the :meth:`postRestart` hook returns. The message +that triggered the exception will not be received again. Any message +sent to an actor while it is being restarted will be queued to its mailbox as +usual. + +Stop Hook +--------- + +After stopping an actor, its :meth:`postStop` hook is called, which may be used +e.g. for deregistering this actor from other services. This hook is guaranteed +to run after message queuing has been disabled for this actor, i.e. messages +sent to a stopped actor will be redirected to the :obj:`deadLetters` of the +:obj:`ActorSystem`. + + +Identifying Actors +================== + +FIXME Actor Path documentation + + +Messages and immutability +========================= + +**IMPORTANT**: Messages can be any kind of object but have to be +immutable. Akka can’t enforce immutability (yet) so this has to be by +convention. + +Here is an example of an immutable message: + +.. includecode:: code/akka/docs/actor/ImmutableMessage.java#immutable-message + + +Send messages +============= + +Messages are sent to an Actor through one of the following methods. + +* ``tell`` means “fire-and-forget”, e.g. send a message asynchronously and return + immediately. +* ``ask`` sends a message asynchronously and returns a :class:`Future` + representing a possible reply. + +Message ordering is guaranteed on a per-sender basis. + +In all these methods you have the option of passing along your own ``ActorRef``. +Make it a practice of doing so because it will allow the receiver actors to be able to respond +to your message, since the sender reference is sent along with the message. + +Tell: Fire-forget +----------------- + +This is the preferred way of sending messages. No blocking waiting for a +message. This gives the best concurrency and scalability characteristics. .. code-block:: java actor.tell("Hello"); -Or with the sender reference passed along: +Or with the sender reference passed along with the message and available to the receiving Actor +in its ``getSender: ActorRef`` member field. The target actor can use this +to reply to the original sender, by using ``getSender().tell(replyMsg)``. .. code-block:: java - actor.tell("Hello", getContext()); + actor.tell("Hello", getSelf()); -If invoked from within an Actor, then the sending actor reference will be implicitly passed along with the message and available to the receiving Actor in its 'getContext().getSender();' method. He can use this to reply to the original sender or use the 'getContext().reply(message);' method. +If invoked without the sender parameter the sender will be +:obj:`deadLetters` actor reference in the target actor. -If invoked from an instance that is **not** an Actor there will be no implicit sender passed along the message and you will get an 'IllegalStateException' if you call 'getContext().reply(..)'. +Ask: Send-And-Receive-Future +---------------------------- -Send-And-Receive-Future -^^^^^^^^^^^^^^^^^^^^^^^ - -Using 'ask' will send a message to the receiving Actor asynchronously and will immediately return a 'Future'. +Using ``ask`` will send a message to the receiving Actor asynchronously and +will immediately return a :class:`Future`: .. code-block:: java - Future future = actorRef.ask("Hello", getContext(), 1000); + long timeoutMillis = 1000; + Future future = actorRef.ask("Hello", timeoutMillis); -The 'Future' interface looks like this: +The receiving actor should reply to this message, which will complete the +future with the reply message as value; ``getSender.tell(result)``. -.. code-block:: java +To complete the future with an exception you need send a Failure message to the sender. +This is not done automatically when an actor throws an exception while processing a +message. - interface Future { - void await(); - boolean isCompleted(); - boolean isExpired(); - long timeoutInNanos(); - Option result(); - Option exception(); - Future onComplete(Procedure> procedure); - } +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java#reply-exception -So the normal way of working with futures is something like this: +If the actor does not complete the future, it will expire after the timeout period, +specified as parameter to the ``ask`` method. -.. code-block:: java +See :ref:`futures-java` for more information on how to await or query a +future. - Future future = actorRef.ask("Hello", getContext(), 1000); - future.await(); - if (future.isCompleted()) { - Option resultOption = future.result(); - if (resultOption.isDefined()) { - Object result = resultOption.get(); - ... - } - ... // whatever - } +The ``onComplete``, ``onResult``, or ``onTimeout`` methods of the ``Future`` can be +used to register a callback to get a notification when the Future completes. +Gives you a way to avoid blocking. -The 'onComplete' callback can be used to register a callback to get a notification when the Future completes. Gives you a way to avoid blocking. +.. warning:: + + When using future callbacks, inside actors you need to carefully avoid closing over + the containing actor’s reference, i.e. do not call methods or access mutable state + on the enclosing actor from within the callback. This would break the actor + encapsulation and may introduce synchronization bugs and race conditions because + the callback will be scheduled concurrently to the enclosing actor. Unfortunately + there is not yet a way to detect these illegal accesses at compile time. See also: + :ref:`jmm-shared-state` + +The future returned from the ``ask`` method can conveniently be passed around or +chained with further processing steps, but sometimes you just need the value, +even if that entails waiting for it (but keep in mind that waiting inside an +actor is prone to dead-locks, e.g. if obtaining the result depends on +processing another message on this actor). + +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java + :include: import-future,using-ask Forward message -^^^^^^^^^^^^^^^ +--------------- -You can forward a message from one actor to another. This means that the original sender address/reference is maintained even though the message is going through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc. You need to pass along your ActorRef context variable as well. +You can forward a message from one actor to another. This means that the +original sender address/reference is maintained even though the message is going +through a 'mediator'. This can be useful when writing actors that work as +routers, load-balancers, replicators etc. +You need to pass along your context variable as well. .. code-block:: java - getContext().forward(message, getContext()); + myActor.forward(message, getContext()); Receive messages ----------------- +================ -When an actor receives a message it is passed into the 'onReceive' method, this is an abstract method on the 'UntypedActor' base class that needs to be defined. +When an actor receives a message it is passed into the ``onReceive`` method, this is +an abstract method on the ``UntypedActor`` base class that needs to be defined. Here is an example: -.. code-block:: java +.. includecode:: code/akka/docs/actor/MyUntypedActor.java#my-untyped-actor - public class SampleUntypedActor extends UntypedActor { +An alternative to using if-instanceof checks is to use `Apache Commons MethodUtils +`_ +to invoke a named method whose parameter type matches the message type. - public void onReceive(Object message) throws Exception { - if (message instanceof String) - EventHandler.info(this, String.format("Received String message: %s", message)); - else - throw new IllegalArgumentException("Unknown message: " + message); - } - } +.. _UntypedActor.Reply: Reply to messages ------------------ +================= -Reply using the channel -^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to have a handle to an object to whom you can reply to the message, you can use the Channel abstraction. -Simply call getContext().channel() and then you can forward that to others, store it away or otherwise until you want to reply, -which you do by Channel.tell(msg) +If you want to have a handle for replying to a message, you can use +``getSender()``, which gives you an ActorRef. You can reply by sending to +that ActorRef with ``getSender().tell(replyMsg)``. You can also store the ActorRef +for replying later, or passing on to other actors. If there is no sender (a +message was sent without an actor or future context) then the sender +defaults to a 'dead-letter' actor ref. .. code-block:: java - public void onReceive(Object message) throws Exception { - if (message instanceof String) { - String msg = (String)message; - if (msg.equals("Hello")) { - // Reply to original sender of message using the channel - getContext().channel().tell(msg + " from " + getContext().getUuid()); - } - } + public void onReceive(Object request) { + String result = process(request); + getSender().tell(result); // will have dead-letter actor as default } -We recommend that you as first choice use the channel abstraction instead of the other ways described in the following sections. +Initial receive timeout +======================= -Reply using the 'tryReply' and 'reply' methods -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +A timeout mechanism can be used to receive a message when no initial message is +received within a certain time. To receive this timeout you have to set the +``receiveTimeout`` property and declare handing for the ReceiveTimeout +message. -If you want to send a message back to the original sender of the message you just received then you can use the 'getContext().reply(..)' method. - -.. code-block:: java - - public void onReceive(Object message) throws Exception { - if (message instanceof String) { - String msg = (String)message; - if (msg.equals("Hello")) { - // Reply to original sender of message using the 'reply' method - getContext().reply(msg + " from " + getContext().getUuid()); - } - } - } - -In this case we will a reply back to the Actor that sent the message. - -The 'reply' method throws an 'IllegalStateException' if unable to determine what to reply to, e.g. the sender has not been passed along with the message when invoking one of 'send*' methods. You can also use the more forgiving 'tryReply' method which returns 'true' if reply was sent, and 'false' if unable to determine what to reply to. - -.. code-block:: java - - public void onReceive(Object message) throws Exception { - if (message instanceof String) { - String msg = (String)message; - if (msg.equals("Hello")) { - // Reply to original sender of message using the 'reply' method - if (getContext().tryReply(msg + " from " + getContext().getUuid())) ... // success - else ... // handle failure - } - } - } - -Summary of reply semantics and options -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -* ``getContext().reply(...)`` can be used to reply to an ``Actor`` or a - ``Future`` from within an actor; the current actor will be passed as reply - channel if the current channel supports this. -* ``getContext().channel`` is a reference providing an abstraction for the - reply channel; this reference may be passed to other actors or used by - non-actor code. - -.. note:: - - There used to be two methods for determining the sending Actor or Future for the current invocation: - - * ``getContext().getSender()`` yielded a :class:`Option[ActorRef]` - * ``getContext().getSenderFuture()`` yielded a :class:`Option[CompletableFuture[Any]]` - - These two concepts have been unified into the ``channel``. If you need to - know the nature of the channel, you may do so using instance tests:: - - if (getContext().channel() instanceof ActorRef) { - ... - } else if (getContext().channel() instanceof ActorPromise) { - ... - } - -Promise represents the write-side of a Future, enabled by the methods - -* completeWithResult(..) -* completeWithException(..) - -Starting actors ---------------- - -Actors are started when they are created by invoking the ‘actorOf’ method. - -.. code-block:: java - - ActorRef actor = actorOf(SampleUntypedActor.class); - -When you create the actor then it will automatically call the 'preStart' callback method on the 'UntypedActor'. This is an excellent place to add initialization code for the actor. - -.. code-block:: java - - @Override - void preStart() { - ... // initialization code - } +.. includecode:: code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java#receive-timeout Stopping actors ---------------- +=============== -Actors are stopped by invoking the ‘stop’ method. +Actors are stopped by invoking the ``stop`` method of the ``ActorRef``. +The actual termination of the actor is performed asynchronously, i.e. +``stop`` may return before the actor is stopped. .. code-block:: java actor.stop(); -When stop is called then a call to the ‘postStop’ callback method will take place. The Actor can use this callback to implement shutdown behavior. +Processing of the current message, if any, will continue before the actor is stopped, +but additional messages in the mailbox will not be processed. By default these +messages are sent to the :obj:`deadLetters` of the :obj:`ActorSystem`, but that +depends on the mailbox implementation. + +When stop is called then a call to the ``def postStop`` callback method will +take place. The ``Actor`` can use this callback to implement shutdown behavior. .. code-block:: java - @Override - void postStop() { + public void postStop() { ... // clean up resources } -You can shut down all Actors in the system by invoking: -.. code-block:: java +All Actors are stopped when the ``ActorSystem`` is stopped. +Supervised actors are stopped when the supervisor is stopped, i.e. children are stopped +when parent is stopped. - Actors.registry().shutdownAll(); PoisonPill ---------- -You can also send an actor the akka.actor.PoisonPill message, which will stop the actor when the message is processed. -If the sender is a Future, the Future will be completed with an akka.actor.ActorKilledException("PoisonPill") +You can also send an actor the ``akka.actor.PoisonPill`` message, which will +stop the actor when the message is processed. ``PoisonPill`` is enqueued as +ordinary messages and will be handled after messages that were already queued +in the mailbox. + +If the ``PoisonPill`` was sent with ``ask``, the ``Future`` will be completed with an +``akka.actor.ActorKilledException("PoisonPill")``. Use it like this: +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java + :include: import-actors,poison-pill + +.. _UntypedActor.HotSwap: + +HotSwap +======= + +Upgrade +------- + +Akka supports hotswapping the Actor’s message loop (e.g. its implementation) at +runtime. Use the ``getContext().become`` method from within the Actor. +The hotswapped code is kept in a Stack which can be pushed and popped. + +.. warning:: + + Please note that the actor will revert to its original behavior when restarted by its Supervisor. + +To hotswap the Actor using ``getContext().become``: + +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java + :include: import-procedure,hot-swap-actor + +The ``become`` method is useful for many different things, such as to implement +a Finite State Machine (FSM). + +Here is another little cute example of ``become`` and ``unbecome`` in action: + +.. includecode:: code/akka/docs/actor/UntypedActorSwapper.java#swapper + +Downgrade +--------- + +Since the hotswapped code is pushed to a Stack you can downgrade the code as +well. Use the ``getContext().unbecome`` method from within the Actor. + .. code-block:: java - import static akka.actor.Actors.*; - - actor.tell(poisonPill()); + public void onReceive(Object message) { + if (message.equals("revert")) getContext().unbecome(); + } Killing an Actor ----------------- +================ -You can kill an actor by sending a 'new Kill()' message. This will restart the actor through regular supervisor semantics. +You can kill an actor by sending a ``Kill`` message. This will restart the actor +through regular supervisor semantics. Use it like this: -.. code-block:: java +.. includecode:: code/akka/docs/actor/UntypedActorTestBase.java + :include: import-actors,kill - import static akka.actor.Actors.*; +Actors and exceptions +===================== - // kill the actor called 'victim' - victim.tell(kill()); +It can happen that while a message is being processed by an actor, that some +kind of exception is thrown, e.g. a database exception. -Actor life-cycle ----------------- +What happens to the Message +--------------------------- -The actor has a well-defined non-circular life-cycle. +If an exception is thrown while a message is being processed (so taken of his +mailbox and handed over the the receive), then this message will be lost. It is +important to understand that it is not put back on the mailbox. So if you want +to retry processing of a message, you need to deal with it yourself by catching +the exception and retry your flow. Make sure that you put a bound on the number +of retries since you don't want a system to livelock (so consuming a lot of cpu +cycles without making progress). -:: +What happens to the mailbox +--------------------------- - NEW (newly created actor) - can't receive messages (yet) - => STARTED (when 'start' is invoked) - can receive messages - => SHUT DOWN (when 'exit' or 'stop' is invoked) - can't do anything +If an exception is thrown while a message is being processed, nothing happens to +the mailbox. If the actor is restarted, the same mailbox will be there. So all +messages on that mailbox, will be there as well. + +What happens to the actor +------------------------- + +If an exception is thrown, the actor instance is discarded and a new instance is +created. This new instance will now be used in the actor references to this actor +(so this is done invisible to the developer). Note that this means that current +state of the failing actor instance is lost if you don't store and restore it in +``preRestart`` and ``postRestart`` callbacks. diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index b5cd58ef70..27aa60ea3a 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -50,8 +50,8 @@ be able to handle unknown messages then you need to have a default case as in the example above. Otherwise an ``UnhandledMessageException`` will be thrown and the actor is restarted when an unknown message is received. -Creating Actors ---------------- +Creating Actors with default constructor +---------------------------------------- .. includecode:: code/ActorDocSpec.scala :include: imports2,system-actorOf @@ -73,6 +73,15 @@ a top level actor, that is supervised by the system (internal guardian actor). .. includecode:: code/ActorDocSpec.scala#context-actorOf Actors are automatically started asynchronously when created. +When you create the ``Actor`` then it will automatically call the ``preStart`` +callback method on the ``Actor`` trait. This is an excellent place to +add initialization code for the actor. + +.. code-block:: scala + + override def preStart() = { + ... // initialization code + } Creating Actors with non-default constructor -------------------------------------------- @@ -110,6 +119,7 @@ When spawning actors for specific sub-tasks from within an actor, it may be conv introduce synchronization bugs and race conditions because the other actor’s code will be scheduled concurrently to the enclosing actor. Unfortunately there is not yet a way to detect these illegal accesses at compile time. + See also: :ref:`jmm-shared-state` Actor API @@ -127,7 +137,7 @@ In addition, it offers: * :obj:`sender` reference sender Actor of the last received message, typically used as described in :ref:`Actor.Reply` * :obj:`context` exposes contextual information for the actor and the current message, such as: - * factory method to create child actors (:meth:`actorOf`) + * factory methods to create child actors (:meth:`actorOf`) * system that the actor belongs to * parent supervisor * supervised children @@ -242,8 +252,8 @@ Messages are sent to an Actor through one of the following methods. Message ordering is guaranteed on a per-sender basis. -Fire-forget ------------ +Tell: Fire-forget +----------------- This is the preferred way of sending messages. No blocking waiting for a message. This gives the best concurrency and scalability characteristics. @@ -260,11 +270,11 @@ to reply to the original sender, by using ``sender ! replyMsg``. If invoked from an instance that is **not** an Actor the sender will be :obj:`deadLetters` actor reference by default. -Send-And-Receive-Future ------------------------ +Ask: Send-And-Receive-Future +---------------------------- Using ``?`` will send a message to the receiving Actor asynchronously and -will return a :class:`Future`: +will immediately return a :class:`Future`: .. code-block:: scala @@ -277,15 +287,7 @@ To complete the future with an exception you need send a Failure message to the This is not done automatically when an actor throws an exception while processing a message. -.. code-block:: scala - - try { - operation() - } catch { - case e: Exception => - sender ! akka.actor.Status.Failure(e) - throw e - } +.. includecode:: code/ActorDocSpec.scala#reply-exception If the actor does not complete the future, it will expire after the timeout period, which is taken from one of the following locations in order of precedence: @@ -304,18 +306,19 @@ which is taken from one of the following locations in order of precedence: See :ref:`futures-scala` for more information on how to await or query a future. +The ``onComplete``, ``onResult``, or ``onTimeout`` methods of the ``Future`` can be +used to register a callback to get a notification when the Future completes. +Gives you a way to avoid blocking. + .. warning:: - When using future callbacks, such as ``onComplete``, ``onResult``, and ``onTimeout``, - inside actors you need to carefully avoid closing over the containing actor’s - reference, i.e. do not call methods or access mutable state on the enclosing actor - from within the callback. This would break the actor encapsulation and may - introduce synchronization bugs and race conditions because the callback - will be scheduled concurrently to the enclosing actor. Unfortunately + When using future callbacks, inside actors you need to carefully avoid closing over + the containing actor’s reference, i.e. do not call methods or access mutable state + on the enclosing actor from within the callback. This would break the actor + encapsulation and may introduce synchronization bugs and race conditions because + the callback will be scheduled concurrently to the enclosing actor. Unfortunately there is not yet a way to detect these illegal accesses at compile time. - -Send-And-Receive-Eventually ---------------------------- + See also: :ref:`jmm-shared-state` The future returned from the ``?`` method can conveniently be passed around or chained with further processing steps, but sometimes you just need the value, @@ -344,7 +347,7 @@ routers, load-balancers, replicators etc. .. code-block:: scala - actor.forward(message) + myActor.forward(message) Receive messages @@ -375,7 +378,7 @@ Reply to messages If you want to have a handle for replying to a message, you can use ``sender``, which gives you an ActorRef. You can reply by sending to -that ActorRef with ``sender ! Message``. You can also store the ActorRef +that ActorRef with ``sender ! replyMsg``. You can also store the ActorRef for replying later, or passing on to other actors. If there is no sender (a message was sent without an actor or future context) then the sender defaults to a 'dead-letter' actor ref. @@ -383,8 +386,8 @@ defaults to a 'dead-letter' actor ref. .. code-block:: scala case request => - val result = process(request) - sender ! result // will have dead-letter actor as default + val result = process(request) + sender ! result // will have dead-letter actor as default Initial receive timeout ======================= @@ -396,26 +399,6 @@ object. .. includecode:: code/ActorDocSpec.scala#receive-timeout -Starting actors -=============== - -Actors are created & started by invoking the ``actorOf`` method. - -.. code-block:: scala - - val actor = actorOf[MyActor] - actor - -When you create the ``Actor`` then it will automatically call the ``def -preStart`` callback method on the ``Actor`` trait. This is an excellent place to -add initialization code for the actor. - -.. code-block:: scala - - override def preStart() = { - ... // initialization code - } - Stopping actors =============== @@ -442,17 +425,20 @@ take place. The ``Actor`` can use this callback to implement shutdown behavior. ... // clean up resources } +All Actors are stopped when the ``ActorSystem`` is stopped. +Supervised actors are stopped when the supervisor is stopped, i.e. children are stopped +when parent is stopped. + PoisonPill -========== +---------- You can also send an actor the ``akka.actor.PoisonPill`` message, which will stop the actor when the message is processed. ``PoisonPill`` is enqueued as ordinary messages and will be handled after messages that were already queued in the mailbox. -If the sender is a ``Future`` (e.g. the message is sent with ``?``), the -``Future`` will be completed with an +If the ``PoisonPill`` was sent with ``?``, the ``Future`` will be completed with an ``akka.actor.ActorKilledException("PoisonPill")``. @@ -465,7 +451,7 @@ Upgrade ------- Akka supports hotswapping the Actor’s message loop (e.g. its implementation) at -runtime: Invoke the ``become`` method from within the Actor. +runtime: Invoke the ``context.become`` method from within the Actor. Become takes a ``PartialFunction[Any, Unit]`` that implements the new message handler. The hotswapped code is kept in a Stack which can be @@ -499,7 +485,7 @@ Downgrade --------- Since the hotswapped code is pushed to a Stack you can downgrade the code as -well, all you need to do is to: Invoke the ``unbecome`` method from within the Actor. +well, all you need to do is to: Invoke the ``context.unbecome`` method from within the Actor. This will pop the Stack and replace the Actor's implementation with the ``PartialFunction[Any, Unit]`` that is at the top of the Stack. @@ -509,7 +495,7 @@ Here's how you use the ``unbecome`` method: .. code-block:: scala def receive = { - case "revert" => unbecome() + case "revert" => context.unbecome() } diff --git a/akka-docs/scala/code/ActorDocSpec.scala b/akka-docs/scala/code/ActorDocSpec.scala index b8a827b9bf..744f439c91 100644 --- a/akka-docs/scala/code/ActorDocSpec.scala +++ b/akka-docs/scala/code/ActorDocSpec.scala @@ -24,7 +24,7 @@ class MyActor extends Actor { } //#my-actor -case class DoIt(msg: Message) +case class DoIt(msg: ImmutableMessage) case class Message(s: String) //#context-actorOf @@ -41,7 +41,7 @@ class FirstActor extends Actor { sender ! replyMsg self.stop() } - def doSomeDangerousWork(msg: Message): String = { "done" } + def doSomeDangerousWork(msg: ImmutableMessage): String = { "done" } }) ! m case replyMsg: String ⇒ sender ! replyMsg @@ -52,9 +52,29 @@ class FirstActor extends Actor { //#system-actorOf object Main extends App { val system = ActorSystem("MySystem") - val myActor = system.actorOf[FirstActor] + val myActor = system.actorOf[MyActor] //#system-actorOf } + +class ReplyException extends Actor { + def receive = { + case _ ⇒ + //#reply-exception + try { + val result = operation() + sender ! result + } catch { + case e: Exception ⇒ + sender ! akka.actor.Status.Failure(e) + throw e + } + //#reply-exception + } + + def operation(): String = { "Hi" } + +} + //#swapper case object Swap class Swapper extends Actor { @@ -167,7 +187,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "creating actor with Props" in { //#creating-props import akka.actor.Props - val dispatcher = system.dispatcherFactory.fromConfig("my-dispatcher") + val dispatcher = system.dispatcherFactory.newFromConfig("my-dispatcher") val myActor = system.actorOf(Props[MyActor].withDispatcher(dispatcher), name = "myactor") //#creating-props @@ -230,6 +250,9 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { } } //#hot-swap-actor + + val actor = system.actorOf(new HotSwapActor) + } } diff --git a/akka-remote/src/main/scala/akka/remote/Remote.scala b/akka-remote/src/main/scala/akka/remote/Remote.scala index 4bf96bd823..49d85c030f 100644 --- a/akka-remote/src/main/scala/akka/remote/Remote.scala +++ b/akka-remote/src/main/scala/akka/remote/Remote.scala @@ -47,7 +47,7 @@ class Remote(val system: ActorSystemImpl, val nodename: String) { val remoteDaemonServiceName = "akka-system-remote-daemon".intern - val computeGridDispatcher = dispatcherFactory.fromConfig("akka.remote.compute-grid-dispatcher") + val computeGridDispatcher = dispatcherFactory.newFromConfig("akka.remote.compute-grid-dispatcher") // FIXME it is probably better to create another supervisor for handling the children created by handle_*, ticket #1408 private[remote] lazy val remoteDaemonSupervisor = system.actorOf(Props( From 1979b14061841061685ff39c2395a12196936942 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 8 Dec 2011 19:29:16 +0100 Subject: [PATCH 02/27] UnhandledMessageException extends RuntimeException. See #1453 --- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- .../code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java | 2 +- akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index a7e6fac96c..88936f2c86 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -93,7 +93,7 @@ case class ActorInterruptedException private[akka] (cause: Throwable) /** * This message is thrown by default when an Actors behavior doesn't match a message */ -case class UnhandledMessageException(msg: Any, ref: ActorRef = null) extends Exception { +case class UnhandledMessageException(msg: Any, ref: ActorRef = null) extends RuntimeException { def this(msg: String) = this(msg, null) diff --git a/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java b/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java index dc88f3d78c..4d88ab36ca 100644 --- a/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java +++ b/akka-docs/java/code/akka/docs/actor/MyReceivedTimeoutUntypedActor.java @@ -13,7 +13,7 @@ public class MyReceivedTimeoutUntypedActor extends UntypedActor { getContext().setReceiveTimeout(Duration.parse("30 seconds")); } - public void onReceive(Object message) throws Exception { + public void onReceive(Object message) { if (message.equals("Hello")) { getSender().tell("Hello world"); } else if (message == Actors.receiveTimeout()) { diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java b/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java index 106407b4b8..7ec924da57 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorSwapper.java @@ -22,7 +22,7 @@ public class UntypedActorSwapper { public static class Swapper extends UntypedActor { LoggingAdapter log = Logging.getLogger(getContext().system(), this); - public void onReceive(Object message) throws Exception { + public void onReceive(Object message) { if (message == SWAP) { log.info("Hi"); getContext().become(new Procedure() { From b22679e43d9c10430da949e0756b56d0205efb46 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 9 Dec 2011 10:53:21 +0100 Subject: [PATCH 03/27] Reuse the deployment and default deployment configs when looping through all deployments. --- .../src/main/scala/akka/actor/Deployer.scala | 40 +++++++++---------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 7159c15ad6..3c30a87c72 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -36,7 +36,7 @@ trait ActorDeployer { class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, val nodename: String) extends ActorDeployer { val deploymentConfig = new DeploymentConfig(nodename) - val log = Logging(eventStream, "Deployer") + private val log = Logging(eventStream, "Deployer") val instance: ActorDeployer = { val deployer = new LocalDeployer() @@ -74,11 +74,14 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, private[akka] def lookupDeploymentFor(path: String): Option[Deploy] = instance.lookupDeploymentFor(path) - private[akka] def deploymentsInConfig: List[Deploy] = { - for (path ← pathsInConfig) yield lookupInConfig(path) + private def deploymentsInConfig: List[Deploy] = { + val allDeployments = settings.config.getConfig("akka.actor.deployment") + val defaultDeployment = allDeployments.getConfig("default") + // foreach akka.actor.deployment. + for (path ← pathsInConfig) yield parseDeploymentConfig(allDeployments.getConfig(path), defaultDeployment, path) } - private[akka] def pathsInConfig: List[String] = { + private def pathsInConfig: List[String] = { def pathSubstring(path: String) = { val i = path.indexOf(".") if (i == -1) path else path.substring(0, i) @@ -92,21 +95,16 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, } /** - * Lookup deployment in 'akka.conf' configuration file. + * Parse deployment in supplied deployment Config, using the + * defaultDeployment Config as fallback. + * The path is the actor path and used for error reporting. + * */ - private[akka] def lookupInConfig(path: String, configuration: Config = settings.config): Deploy = { + private def parseDeploymentConfig(deployment: Config, defaultDeployment: Config, path: String): Deploy = { import scala.collection.JavaConverters._ import akka.util.ReflectiveAccess.getClassFor - val defaultDeploymentConfig = configuration.getConfig("akka.actor.deployment.default") - - // -------------------------------- - // akka.actor.deployment. - // -------------------------------- - val deploymentKey = "akka.actor.deployment." + path - val deployment = configuration.getConfig(deploymentKey) - - val deploymentWithFallback = deployment.withFallback(defaultDeploymentConfig) + val deploymentWithFallback = deployment.withFallback(defaultDeployment) // -------------------------------- // akka.actor.deployment..router // -------------------------------- @@ -128,7 +126,7 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, if (router == Direct) OneNrOfInstances else { def invalidNrOfInstances(wasValue: Any) = new ConfigurationException( - "Config option [" + deploymentKey + + "Deployment config option [" + path + ".nr-of-instances] needs to be either [\"auto\"] or [1-N] - was [" + wasValue + "]") @@ -155,7 +153,7 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, case "" ⇒ None case impl ⇒ val implementationClass = getClassFor[Actor](impl).fold(e ⇒ throw new ConfigurationException( - "Config option [" + deploymentKey + ".create-as.class] load failed", e), identity) + "Deployment config option [" + path + ".create-as.class] load failed", e), identity) Some(ActorRecipe(implementationClass)) } @@ -167,7 +165,7 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, // -------------------------------- def parseRemote: Scope = { def raiseRemoteNodeParsingError() = throw new ConfigurationException( - "Config option [" + deploymentKey + + "Deployment config option [" + path + ".remote.nodes] needs to be a list with elements on format \":\", was [" + remoteNodes.mkString(", ") + "]") val remoteAddresses = remoteNodes map { node ⇒ @@ -190,7 +188,7 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, // -------------------------------- def parseCluster: Scope = { def raiseHomeConfigError() = throw new ConfigurationException( - "Config option [" + deploymentKey + + "Deployment config option [" + path + ".cluster.preferred-nodes] needs to be a list with elements on format\n'host:', 'ip:' or 'node:', was [" + clusterPreferredNodes + "]") @@ -222,7 +220,7 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, case "transaction-log" ⇒ TransactionLog case "data-grid" ⇒ DataGrid case unknown ⇒ - throw new ConfigurationException("Config option [" + deploymentKey + + throw new ConfigurationException("Deployment config option [" + path + ".cluster.replication.storage] needs to be either [\"transaction-log\"] or [\"data-grid\"] - was [" + unknown + "]") } @@ -230,7 +228,7 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, case "write-through" ⇒ WriteThrough case "write-behind" ⇒ WriteBehind case unknown ⇒ - throw new ConfigurationException("Config option [" + deploymentKey + + throw new ConfigurationException("Deployment config option [" + path + ".cluster.replication.strategy] needs to be either [\"write-through\"] or [\"write-behind\"] - was [" + unknown + "]") } From 9a677e528ebab61d1ec26d13d41884be3238e14f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 9 Dec 2011 12:14:55 +0100 Subject: [PATCH 04/27] whitespace format --- akka-actor/src/main/scala/akka/actor/Deployer.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 3c30a87c72..52381dc60b 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -95,10 +95,10 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, } /** - * Parse deployment in supplied deployment Config, using the - * defaultDeployment Config as fallback. - * The path is the actor path and used for error reporting. - * + * Parse deployment in supplied deployment Config, using the + * defaultDeployment Config as fallback. + * The path is the actor path and used for error reporting. + * */ private def parseDeploymentConfig(deployment: Config, defaultDeployment: Config, path: String): Deploy = { import scala.collection.JavaConverters._ From f28a1f3834ab8d05cb6755b620f68cc00b4ddc97 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 9 Dec 2011 12:16:13 +0100 Subject: [PATCH 05/27] Fixed another shutdown of dispatcher issue. See #1454 --- .../netty/akka/util/HashedWheelTimer.java | 4 ++-- .../akka/dispatch/AbstractDispatcher.scala | 20 ++++++++++++------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java b/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java index d0112ded79..6e54fa2233 100644 --- a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java +++ b/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java @@ -482,10 +482,10 @@ public class HashedWheelTimer implements Timer { buf.append("deadline: "); if (remaining > 0) { buf.append(remaining); - buf.append(" ms later, "); + buf.append(" ns later, "); } else if (remaining < 0) { buf.append(-remaining); - buf.append(" ms ago, "); + buf.append(" ns ago, "); } else { buf.append("now, "); } diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index b7f2afc8f7..85094bfe3a 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -137,7 +137,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext shutdownScheduleUpdater.get(this) match { case UNSCHEDULED ⇒ if (shutdownScheduleUpdater.compareAndSet(this, UNSCHEDULED, SCHEDULED)) { - scheduler.scheduleOnce(shutdownTimeout, shutdownAction) + scheduleShutdownAction() () } else ifSensibleToDoSoThenScheduleShutdown() case SCHEDULED ⇒ @@ -148,6 +148,13 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext case _ ⇒ () } + private def scheduleShutdownAction(): Unit = { + // IllegalStateException is thrown if scheduler has been shutdown + try scheduler.scheduleOnce(shutdownTimeout, shutdownAction) catch { + case _: IllegalStateException ⇒ shutdown() + } + } + private final val taskCleanup: () ⇒ Unit = () ⇒ if (inhabitantsUpdater.decrementAndGet(this) == 0) ifSensibleToDoSoThenScheduleShutdown() @@ -185,9 +192,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext } case RESCHEDULED ⇒ if (shutdownScheduleUpdater.compareAndSet(MessageDispatcher.this, RESCHEDULED, SCHEDULED)) - try scheduler.scheduleOnce(shutdownTimeout, this) catch { - case _: IllegalStateException ⇒ shutdown() - } + scheduleShutdownAction() else run() } } @@ -279,9 +284,10 @@ abstract class MessageDispatcherConfigurator() { } } - def configureThreadPool(config: Config, - settings: Settings, - createDispatcher: ⇒ (ThreadPoolConfig) ⇒ MessageDispatcher): ThreadPoolConfigDispatcherBuilder = { + def configureThreadPool( + config: Config, + settings: Settings, + createDispatcher: ⇒ (ThreadPoolConfig) ⇒ MessageDispatcher): ThreadPoolConfigDispatcherBuilder = { import ThreadPoolConfigDispatcherBuilder.conf_? //Apply the following options to the config if they are present in the config From 09719af11a2d8e9799361e128432339acb0e423c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 9 Dec 2011 12:29:24 +0100 Subject: [PATCH 06/27] From review comments --- .../src/main/scala/akka/actor/ActorRef.scala | 23 +++++++++++++++---- akka-docs/java/untyped-actors.rst | 2 +- akka-docs/scala/actors.rst | 2 +- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 97e36086db..c6f6333822 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -82,17 +82,25 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable final def tell(msg: Any, sender: ActorRef): Unit = this.!(msg)(sender) /** - * Akka Java API.

+ * Akka Java API. + * * Sends a message asynchronously returns a future holding the eventual reply message. - *

+ * * NOTE: * Use this method with care. In most cases it is better to use 'tell' together with the sender * parameter to implement non-blocking request/response message exchanges. - *

+ * * If you are sending messages using ask and using blocking operations on the Future, such as * 'get', then you have to use getContext().sender().tell(...) * in the target actor to send a reply message to the original sender, and thereby completing the Future, * otherwise the sender will block until the timeout expires. + * + * When using future callbacks, inside actors you need to carefully avoid closing over + * the containing actor’s reference, i.e. do not call methods or access mutable state + * on the enclosing actor from within the callback. This would break the actor + * encapsulation and may introduce synchronization bugs and race conditions because + * the callback will be scheduled concurrently to the enclosing actor. Unfortunately + * there is not yet a way to detect these illegal accesses at compile time. */ def ask(message: AnyRef, timeout: Timeout): Future[AnyRef] = ?(message, timeout).asInstanceOf[Future[AnyRef]] @@ -154,11 +162,18 @@ trait ScalaActorRef { ref: ActorRef ⇒ * NOTE: * Use this method with care. In most cases it is better to use '!' together with implicit or explicit * sender parameter to implement non-blocking request/response message exchanges. - *

+ * * If you are sending messages using ask and using blocking operations on the Future, such as * 'get', then you have to use getContext().sender().tell(...) * in the target actor to send a reply message to the original sender, and thereby completing the Future, * otherwise the sender will block until the timeout expires. + * + * When using future callbacks, inside actors you need to carefully avoid closing over + * the containing actor’s reference, i.e. do not call methods or access mutable state + * on the enclosing actor from within the callback. This would break the actor + * encapsulation and may introduce synchronization bugs and race conditions because + * the callback will be scheduled concurrently to the enclosing actor. Unfortunately + * there is not yet a way to detect these illegal accesses at compile time. */ def ?(message: Any)(implicit timeout: Timeout): Future[Any] diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index 2e6e68b215..4324aadf19 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -268,7 +268,7 @@ Gives you a way to avoid blocking. the containing actor’s reference, i.e. do not call methods or access mutable state on the enclosing actor from within the callback. This would break the actor encapsulation and may introduce synchronization bugs and race conditions because - the callback will be scheduled concurrently to the enclosing actor. Unfortunately + the callback will be scheduled concurrently to the enclosing actor. Unfortunately there is not yet a way to detect these illegal accesses at compile time. See also: :ref:`jmm-shared-state` diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 27aa60ea3a..eb4649e2c8 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -316,7 +316,7 @@ Gives you a way to avoid blocking. the containing actor’s reference, i.e. do not call methods or access mutable state on the enclosing actor from within the callback. This would break the actor encapsulation and may introduce synchronization bugs and race conditions because - the callback will be scheduled concurrently to the enclosing actor. Unfortunately + the callback will be scheduled concurrently to the enclosing actor. Unfortunately there is not yet a way to detect these illegal accesses at compile time. See also: :ref:`jmm-shared-state` From 9fdf9a9c663d91eb0c05f4493d482e77fbbb77dc Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 9 Dec 2011 12:41:08 +0100 Subject: [PATCH 07/27] Removed mist from docs. See #1455 --- .../additional/external-sample-projects.rst | 16 - .../intro/getting-started-first-java.rst | 1 - .../getting-started-first-scala-eclipse.rst | 1 - .../intro/getting-started-first-scala.rst | 1 - akka-docs/intro/getting-started.rst | 1 - akka-docs/scala/http.rst | 363 ------------------ 6 files changed, 383 deletions(-) diff --git a/akka-docs/additional/external-sample-projects.rst b/akka-docs/additional/external-sample-projects.rst index 35a54c3c80..80e56823af 100644 --- a/akka-docs/additional/external-sample-projects.rst +++ b/akka-docs/additional/external-sample-projects.rst @@ -56,22 +56,6 @@ Sample parallel computing with Akka and Scala API ``_ -Akka, Facebook Graph API, WebGL sample -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Showcasing Akka Mist HTTP module -``_ - -Akka Mist Sample -^^^^^^^^^^^^^^^^ - -``_ - -Another Akka Mist Sample -^^^^^^^^^^^^^^^^^^^^^^^^ - -``_ - Bank application ^^^^^^^^^^^^^^^^ diff --git a/akka-docs/intro/getting-started-first-java.rst b/akka-docs/intro/getting-started-first-java.rst index ee890d723d..9ae9e87441 100644 --- a/akka-docs/intro/getting-started-first-java.rst +++ b/akka-docs/intro/getting-started-first-java.rst @@ -108,7 +108,6 @@ Akka is very modular and has many JARs for containing different features. The co - ``akka-typed-actor-2.0-SNAPSHOT.jar`` -- Typed Actors - ``akka-remote-2.0-SNAPSHOT.jar`` -- Remote Actors - ``akka-stm-2.0-SNAPSHOT.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures -- ``akka-http-2.0-SNAPSHOT.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration - ``akka-slf4j-2.0-SNAPSHOT.jar`` -- SLF4J Event Handler Listener for logging with SLF4J - ``akka-testkit-2.0-SNAPSHOT.jar`` -- Toolkit for testing Actors diff --git a/akka-docs/intro/getting-started-first-scala-eclipse.rst b/akka-docs/intro/getting-started-first-scala-eclipse.rst index d764e0cdae..45fbfd24ce 100644 --- a/akka-docs/intro/getting-started-first-scala-eclipse.rst +++ b/akka-docs/intro/getting-started-first-scala-eclipse.rst @@ -93,7 +93,6 @@ Akka is very modular and has many JARs for containing different features. The co - ``akka-typed-actor-2.0-SNAPSHOT.jar`` -- Typed Actors - ``akka-remote-2.0-SNAPSHOT.jar`` -- Remote Actors - ``akka-stm-2.0-SNAPSHOT.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures -- ``akka-http-2.0-SNAPSHOT.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration - ``akka-slf4j-2.0-SNAPSHOT.jar`` -- SLF4J Event Handler Listener for logging with SLF4J - ``akka-testkit-2.0-SNAPSHOT.jar`` -- Toolkit for testing Actors diff --git a/akka-docs/intro/getting-started-first-scala.rst b/akka-docs/intro/getting-started-first-scala.rst index 91a730819f..73aace96bf 100644 --- a/akka-docs/intro/getting-started-first-scala.rst +++ b/akka-docs/intro/getting-started-first-scala.rst @@ -114,7 +114,6 @@ core distribution has seven modules: - ``akka-typed-actor-2.0-SNAPSHOT.jar`` -- Typed Actors - ``akka-remote-2.0-SNAPSHOT.jar`` -- Remote Actors - ``akka-stm-2.0-SNAPSHOT.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures -- ``akka-http-2.0-SNAPSHOT.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration - ``akka-slf4j-2.0-SNAPSHOT.jar`` -- SLF4J Event Handler Listener for logging with SLF4J - ``akka-testkit-2.0-SNAPSHOT.jar`` -- Toolkit for testing Actors diff --git a/akka-docs/intro/getting-started.rst b/akka-docs/intro/getting-started.rst index 31d579f3ed..5eeec4a12a 100644 --- a/akka-docs/intro/getting-started.rst +++ b/akka-docs/intro/getting-started.rst @@ -49,7 +49,6 @@ Akka is very modular and has many JARs for containing different features. - ``akka-typed-actor-2.0-SNAPSHOT.jar`` -- Typed Actors - ``akka-remote-2.0-SNAPSHOT.jar`` -- Remote Actors - ``akka-stm-2.0-SNAPSHOT.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures -- ``akka-http-2.0-SNAPSHOT.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration - ``akka-slf4j-2.0-SNAPSHOT.jar`` -- SLF4J Event Handler Listener - ``akka-testkit-2.0-SNAPSHOT.jar`` -- Toolkit for testing Actors - ``akka-camel-2.0-SNAPSHOT.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world) diff --git a/akka-docs/scala/http.rst b/akka-docs/scala/http.rst index 31cdbd9430..85173516db 100644 --- a/akka-docs/scala/http.rst +++ b/akka-docs/scala/http.rst @@ -104,367 +104,4 @@ If you want to use jetty-run in SBT you need to exclude the version of Jetty tha -Mist - Lightweight Asynchronous HTTP ------------------------------------- -The *Mist* layer was developed to provide a direct connection between the servlet container and Akka actors with the goal of handling the incoming HTTP request as quickly as possible in an asynchronous manner. The motivation came from the simple desire to treat REST calls as completable futures, that is, effectively passing the request along an actor message chain to be resumed at the earliest possible time. The primary constraint was to not block any existing threads and secondarily, not create additional ones. Mist is very simple and works both with Jetty Continuations as well as with Servlet API 3.0 (tested using Jetty-8.0.0.M1). When the servlet handles a request, a message is created typed to represent the method (e.g. Get, Post, etc.), the request is suspended and the message is sent (fire-and-forget) to the *root endpoint* actor. That's it. There are no POJOs required to host the service endpoints and the request is treated as any other. The message can be resumed (completed) using a number of helper methods that set the proper HTTP response status code. - -Complete runnable example can be found here: ``_ - -Endpoints -^^^^^^^^^ - -Endpoints are actors that handle request messages. Minimally there must be an instance of the *RootEndpoint* and then at least one more (to implement your services). - -Preparations -^^^^^^^^^^^^ - -In order to use Mist you have to register the MistServlet in *web.xml* or do the analogous for the embedded server if running in Akka Microkernel: - -.. code-block:: xml - - - akkaMistServlet - akka.http.AkkaMistServlet - - root-endpoint - address_of_root_endpoint_actor - - - - - - akkaMistServlet - /* - - -Then you also have to add the following dependencies to your SBT build definition: - -.. code-block:: scala - - val jettyWebapp = "org.eclipse.jetty" % "jetty-webapp" % "8.0.0.M2" % "test" - val javaxServlet30 = "org.mortbay.jetty" % "servlet-api" % "3.0.20100224" % "provided" - -Attention: You have to use SBT 0.7.5.RC0 or higher in order to be able to work with that Jetty version. - -An Example -^^^^^^^^^^ - -Startup -******* - -In this example, we'll use the built-in *RootEndpoint* class and implement our own service from that. Here the services are started in the boot loader and attached to the top level supervisor. - -.. code-block:: scala - - class Boot { - val factory = SupervisorFactory( - SupervisorConfig( - OneForOneStrategy(List(classOf[Exception]), 3, 100), - // - // in this particular case, just boot the built-in default root endpoint - // - Supervise( - actorOf[RootEndpoint], - Permanent) :: - Supervise( - actorOf[SimpleAkkaAsyncHttpService], - Permanent) - :: Nil)) - factory.newInstance.start - } - -**Defining the Endpoint** -The service is an actor that mixes in the *Endpoint* trait. Here the dispatcher is taken from the Akka configuration file which allows for custom tuning of these actors, though naturally, any dispatcher can be used. - -URI Handling -************ - -Rather than use traditional annotations to pair HTTP request and class methods, Mist uses hook and provide functions. This offers a great deal of flexibility in how a given endpoint responds to a URI. A hook function is simply a filter, returning a Boolean to indicate whether or not the endpoint will handle the URI. This can be as simple as a straight match or as fancy as you need. If a hook for a given URI returns true, the matching provide function is called to obtain an actor to which the message can be delivered. Notice in the example below, in one case, the same actor is returned and in the other, a new actor is created and returned. Note that URI hooking is non-exclusive and a message can be delivered to multiple actors (see next example). - -Plumbing -******** - -Hook and provider functions are attached to a parent endpoint, in this case the root, by sending it the **Endpoint.Attach** message. -Finally, bind the *handleHttpRequest* function of the *Endpoint* trait to the actor's *receive* function and we're done. - -.. code-block:: scala - - class SimpleAkkaAsyncHttpService extends Actor with Endpoint { - final val ServiceRoot = "/simple/" - final val ProvideSameActor = ServiceRoot + "same" - final val ProvideNewActor = ServiceRoot + "new" - - // - // use the configurable dispatcher - // - self.dispatcher = Endpoint.Dispatcher - - // - // there are different ways of doing this - in this case, we'll use a single hook function - // and discriminate in the provider; alternatively we can pair hooks & providers - // - def hook(uri: String): Boolean = ((uri == ProvideSameActor) || (uri == ProvideNewActor)) - def provide(uri: String): ActorRef = { - if (uri == ProvideSameActor) same - else actorOf[BoringActor] - } - - // - // this is where you want attach your endpoint hooks - // - override def preStart() = { - // - // we expect there to be one root and that it's already been started up - // obviously there are plenty of other ways to obtaining this actor - // the point is that we need to attach something (for starters anyway) - // to the root - // - val root = Actor.registry.actorsFor(classOf[RootEndpoint]).head - root ! Endpoint.Attach(hook, provide) - } - - // - // since this actor isn't doing anything else (i.e. not handling other messages) - // just assign the receive func like so... - // otherwise you could do something like: - // def myrecv = {...} - // def receive = myrecv orElse _recv - // - def receive = handleHttpRequest - - // - // this will be our "same" actor provided with ProvideSameActor endpoint is hit - // - lazy val same = actorOf[BoringActor] - } - -Handling requests -***************** - -Messages are handled just as any other that are received by your actor. The servlet requests and response are not hidden and can be accessed directly as shown below. - -.. code-block:: scala - - /** - * Define a service handler to respond to some HTTP requests - */ - class BoringActor extends Actor { - import java.util.Date - import javax.ws.rs.core.MediaType - - var gets = 0 - var posts = 0 - var lastget: Option[Date] = None - var lastpost: Option[Date] = None - - def receive = { - // handle a get request - case get: Get => - // the content type of the response. - // similar to @Produces annotation - get.response.setContentType(MediaType.TEXT_HTML) - - // - // "work" - // - gets += 1 - lastget = Some(new Date) - - // - // respond - // - val res = "

Gets: "+gets+" Posts: "+posts+"

Last Get: "+lastget.getOrElse("Never").toString+" Last Post: "+lastpost.getOrElse("Never").toString+"

" - get.OK(res) - - // handle a post request - case post:Post => - // the expected content type of the request - // similar to @Consumes - if (post.request.getContentType startsWith MediaType.APPLICATION_FORM_URLENCODED) { - // the content type of the response. - // similar to @Produces annotation - post.response.setContentType(MediaType.TEXT_HTML) - - // "work" - posts += 1 - lastpost = Some(new Date) - - // respond - val res = "

Gets: "+gets+" Posts: "+posts+"

Last Get: "+lastget.getOrElse("Never").toString+" Last Post: "+lastpost.getOrElse("Never").toString+"

" - post.OK(res) - } else { - post.UnsupportedMediaType("Content-Type request header missing or incorrect (was '" + post.request.getContentType + "' should be '" + MediaType.APPLICATION_FORM_URLENCODED + "')") - } - } - - case other: RequestMethod => - other.NotAllowed("Invalid method for this endpoint") - } - } - -**Timeouts** -Messages will expire according to the default timeout (specified in akka.conf). Individual messages can also be updated using the *timeout* method. One thing that may seem unexpected is that when an expired request returns to the caller, it will have a status code of OK (200). Mist will add an HTTP header to such responses to help clients, if applicable. By default, the header will be named "Async-Timeout" with a value of "expired" - both of which are configurable. - -Another Example - multiplexing handlers -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As noted above, hook functions are non-exclusive. This means multiple actors can handle the same request if desired. In this next example, the hook functions are identical (yes, the same one could have been reused) and new instances of both A and B actors will be created to handle the Post. A third mediator is inserted to coordinate the results of these actions and respond to the caller. - -.. code-block:: scala - - package sample.mist - - import akka.actor._ - import akka.actor.Actor._ - import akka.http._ - - import javax.servlet.http.HttpServletResponse - - class InterestingService extends Actor with Endpoint { - final val ServiceRoot = "/interesting/" - final val Multi = ServiceRoot + "multi/" - // use the configurable dispatcher - self.dispatcher = Endpoint.Dispatcher - - // - // The "multi" endpoint shows forking off multiple actions per request - // It is triggered by POSTing to http://localhost:9998/interesting/multi/{foo} - // Try with/without a header named "Test-Token" - // Try with/without a form parameter named "Data" - def hookMultiActionA(uri: String): Boolean = uri startsWith Multi - def provideMultiActionA(uri: String): ActorRef = actorOf(new ActionAActor(complete)) - - def hookMultiActionB(uri: String): Boolean = uri startsWith Multi - def provideMultiActionB(uri: String): ActorRef = actorOf(new ActionBActor(complete)) - - // - // this is where you want attach your endpoint hooks - // - override def preStart() = { - // - // we expect there to be one root and that it's already been started up - // obviously there are plenty of other ways to obtaining this actor - // the point is that we need to attach something (for starters anyway) - // to the root - // - val root = Actor.registry.actorsFor(classOf[RootEndpoint]).head - root ! Endpoint.Attach(hookMultiActionA, provideMultiActionA) - root ! Endpoint.Attach(hookMultiActionB, provideMultiActionB) - } - - // - // since this actor isn't doing anything else (i.e. not handling other messages) - // just assign the receive func like so... - // otherwise you could do something like: - // def myrecv = {...} - // def receive = myrecv orElse handleHttpRequest - // - def receive = handleHttpRequest - - // - // this guy completes requests after other actions have occurred - // - lazy val complete = actorOf[ActionCompleteActor] - } - - class ActionAActor(complete:ActorRef) extends Actor { - import javax.ws.rs.core.MediaType - - def receive = { - // handle a post request - case post: Post => - // the expected content type of the request - // similar to @Consumes - if (post.request.getContentType startsWith MediaType.APPLICATION_FORM_URLENCODED) { - // the content type of the response. - // similar to @Produces annotation - post.response.setContentType(MediaType.TEXT_HTML) - - // get the resource name - val name = post.request.getRequestURI.substring("/interesting/multi/".length) - if (name.length % 2 == 0) post.response.getWriter.write("

Action A verified request.

") - else post.response.getWriter.write("

Action A could not verify request.

") - - // notify the next actor to coordinate the response - complete ! post - } else post.UnsupportedMediaType("Content-Type request header missing or incorrect (was '" + post.request.getContentType + "' should be '" + MediaType.APPLICATION_FORM_URLENCODED + "')") - } - } - } - - class ActionBActor(complete:ActorRef) extends Actor { - import javax.ws.rs.core.MediaType - - def receive = { - // handle a post request - case post: Post => - // the expected content type of the request - // similar to @Consumes - if (post.request.getContentType startsWith MediaType.APPLICATION_FORM_URLENCODED) { - // pull some headers and form params - def default(any: Any): String = "" - - val token = post.getHeaderOrElse("Test-Token", default) - val data = post.getParameterOrElse("Data", default) - - val (resp, status) = (token, data) match { - case ("", _) => ("No token provided", HttpServletResponse.SC_FORBIDDEN) - case (_, "") => ("No data", HttpServletResponse.SC_ACCEPTED) - case _ => ("Data accepted", HttpServletResponse.SC_OK) - } - - // update the response body - post.response.getWriter.write(resp) - - // notify the next actor to coordinate the response - complete ! (post, status) - } else post.UnsupportedMediaType("Content-Type request header missing or incorrect (was '" + post.request.getContentType + "' should be '" + MediaType.APPLICATION_FORM_URLENCODED + "')") - } - - case other: RequestMethod => - other.NotAllowed("Invalid method for this endpoint") - } - } - - class ActionCompleteActor extends Actor { - import collection.mutable.HashMap - - val requests = HashMap.empty[Int, Int] - - def receive = { - case req: RequestMethod => - if (requests contains req.hashCode) complete(req) - else requests += (req.hashCode -> 0) - - case t: Tuple2[RequestMethod, Int] => - if (requests contains t._1.hashCode) complete(t._1) - else requests += (t._1.hashCode -> t._2) - } - - def complete(req: RequestMethod) = requests.remove(req.hashCode) match { - case Some(HttpServletResponse.SC_FORBIDDEN) => req.Forbidden("") - case Some(HttpServletResponse.SC_ACCEPTED) => req.Accepted("") - case Some(_) => req.OK("") - case _ => {} - } - } - -Examples -^^^^^^^^ - -Using the Akka Mist module with OAuth -************************************* - -``_ - -Using the Akka Mist module with the Facebook Graph API and WebGL -**************************************************************** - -Example project using Akka Mist with the Facebook Graph API and WebGL -``_ - -Using Akka Mist on Amazon ElasticBeanstalk -****************************************** - -``_ From 884dc43a7d2541da6e0b87a19b9813040a643845 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 9 Dec 2011 13:27:27 +0100 Subject: [PATCH 08/27] DOC: Replace all akka.conf references. Fixes #1469 * Let us use :ref:`configuration` in all places to refer to the configuration. --- akka-docs/cluster/durable-mailbox.rst | 14 +++++--------- akka-docs/dev/multi-jvm-testing.rst | 8 ++++---- akka-docs/disabled/clustering.rst | 6 +++--- akka-docs/general/configuration.rst | 2 ++ akka-docs/general/event-handler.rst | 3 ++- akka-docs/general/slf4j.rst | 3 ++- akka-docs/intro/deployment-scenarios.rst | 6 +++--- .../intro/getting-started-first-java.rst | 8 -------- .../getting-started-first-scala-eclipse.rst | 7 +------ .../intro/getting-started-first-scala.rst | 12 ------------ akka-docs/java/dispatchers.rst | 19 ++++++++----------- akka-docs/java/futures.rst | 2 +- akka-docs/java/stm.rst | 18 +----------------- akka-docs/java/typed-actors.rst | 2 +- akka-docs/modules/camel.rst | 12 +++++------- akka-docs/modules/microkernel.rst | 7 +++---- akka-docs/scala/dispatchers.rst | 13 ++----------- akka-docs/scala/fsm.rst | 2 +- akka-docs/scala/futures.rst | 2 +- akka-docs/scala/stm.rst | 18 +----------------- akka-docs/scala/testing.rst | 6 +++--- akka-docs/scala/typed-actors.rst | 2 +- 22 files changed, 50 insertions(+), 122 deletions(-) diff --git a/akka-docs/cluster/durable-mailbox.rst b/akka-docs/cluster/durable-mailbox.rst index 774008c6da..875d6ea9fb 100644 --- a/akka-docs/cluster/durable-mailbox.rst +++ b/akka-docs/cluster/durable-mailbox.rst @@ -74,8 +74,7 @@ storage. Read more about that in the :ref:`dispatchers-scala` documentation. You can also configure and tune the file-based durable mailbox. This is done in -the ``akka.actor.mailbox.file-based`` section in the ``akka.conf`` configuration -file. +the ``akka.actor.mailbox.file-based`` section in the :ref:`configuration`. .. code-block:: none @@ -125,8 +124,7 @@ or for a thread-based durable dispatcher:: RedisDurableMailboxStorage) You also need to configure the IP and port for the Redis server. This is done in -the ``akka.actor.mailbox.redis`` section in the ``akka.conf`` configuration -file. +the ``akka.actor.mailbox.redis`` section in the :ref:`configuration`. .. code-block:: none @@ -169,8 +167,7 @@ or for a thread-based durable dispatcher:: ZooKeeperDurableMailboxStorage) You also need to configure ZooKeeper server addresses, timeouts, etc. This is -done in the ``akka.actor.mailbox.zookeeper`` section in the ``akka.conf`` -configuration file. +done in the ``akka.actor.mailbox.zookeeper`` section in the :ref:`configuration`. .. code-block:: none @@ -208,7 +205,7 @@ or for a thread-based durable dispatcher. :: You also need to configure the IP, and port, and so on, for the Beanstalk server. This is done in the ``akka.actor.mailbox.beanstalk`` section in the -``akka.conf`` configuration file. +:ref:`configuration`. .. code-block:: none @@ -238,8 +235,7 @@ features cohesive to a fast, reliable & durable queueing mechanism which the Akk Akka's implementations of MongoDB mailboxes are built on top of the purely asynchronous MongoDB driver (often known as `Hammersmith `_ and ``com.mongodb.async``) and as such are purely callback based with a Netty network layer. This makes them extremely fast & lightweight versus building on other MongoDB implementations such as `mongo-java-driver `_ and `Casbah `_. You will need to configure the URI for the MongoDB server, using the URI Format specified in the `MongoDB Documentation `_. This is done in -the ``akka.actor.mailbox.mongodb`` section in the ``akka.conf`` configuration -file. +the ``akka.actor.mailbox.mongodb`` section in the :ref:`configuration`. .. code-block:: none diff --git a/akka-docs/dev/multi-jvm-testing.rst b/akka-docs/dev/multi-jvm-testing.rst index 7e79f65bfa..dade7c30c1 100644 --- a/akka-docs/dev/multi-jvm-testing.rst +++ b/akka-docs/dev/multi-jvm-testing.rst @@ -35,7 +35,7 @@ multi-JVM testing:: base = file("akka-cluster"), settings = defaultSettings ++ MultiJvmPlugin.settings ++ Seq( extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => - (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq + (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dconfig.file=" + _.absolutePath).toSeq }, test in Test <<= (test in Test) dependsOn (test in MultiJvm) ) @@ -176,10 +176,10 @@ and add the options to them. -Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Overriding akka.conf options ----------------------------- +Overriding configuration options +-------------------------------- -You can also override the options in the ``akka.conf`` file with different options for each +You can also override the options in the :ref:`configuration` file with different options for each spawned JVM. You do that by creating a file named after the node in the test with suffix ``.conf`` and put them in the same directory as the test . diff --git a/akka-docs/disabled/clustering.rst b/akka-docs/disabled/clustering.rst index f384a37ca0..559233143d 100644 --- a/akka-docs/disabled/clustering.rst +++ b/akka-docs/disabled/clustering.rst @@ -48,8 +48,8 @@ cluster node. Cluster configuration ~~~~~~~~~~~~~~~~~~~~~ -Cluster is configured in the ``akka.cloud.cluster`` section in the ``akka.conf`` -configuration file. Here you specify the default addresses to the ZooKeeper +Cluster is configured in the ``akka.cloud.cluster`` section in the :ref:`configuration`. +Here you specify the default addresses to the ZooKeeper servers, timeouts, if compression should be on or off, and so on. .. code-block:: conf @@ -594,7 +594,7 @@ Consolidation and management of the Akka configuration file Not implemented yet. -The actor configuration file ``akka.conf`` will also be stored into the cluster +The actor :ref:`configuration` file will also be stored into the cluster and it will be possible to have one single configuration file, stored on the server, and pushed out to all the nodes that joins the cluster. Each node only needs to be configured with the ZooKeeper server address and the master configuration will only reside in one single place diff --git a/akka-docs/general/configuration.rst b/akka-docs/general/configuration.rst index 0e96f8165e..9328e19561 100644 --- a/akka-docs/general/configuration.rst +++ b/akka-docs/general/configuration.rst @@ -1,3 +1,5 @@ +.. _configuration: + Configuration ============= diff --git a/akka-docs/general/event-handler.rst b/akka-docs/general/event-handler.rst index c23911939e..e6aa422b37 100644 --- a/akka-docs/general/event-handler.rst +++ b/akka-docs/general/event-handler.rst @@ -9,7 +9,8 @@ There is an Event Handler which takes the place of a logging system in Akka: akka.event.EventHandler -You can configure which event handlers should be registered at boot time. That is done using the 'event-handlers' element in akka.conf. Here you can also define the log level. +You can configure which event handlers should be registered at boot time. That is done using the 'event-handlers' element in +the :ref:`configuration`. Here you can also define the log level. .. code-block:: ruby diff --git a/akka-docs/general/slf4j.rst b/akka-docs/general/slf4j.rst index 876b139d65..296cbb7b48 100644 --- a/akka-docs/general/slf4j.rst +++ b/akka-docs/general/slf4j.rst @@ -14,7 +14,8 @@ also need a SLF4J backend, we recommend `Logback `_: Event Handler ------------- -This module includes a SLF4J Event Handler that works with Akka's standard Event Handler. You enabled it in the 'event-handlers' element in akka.conf. Here you can also define the log level. +This module includes a SLF4J Event Handler that works with Akka's standard Event Handler. You enabled it in the 'event-handlers' element in +the :ref:`configuration`. Here you can also define the log level. .. code-block:: ruby diff --git a/akka-docs/intro/deployment-scenarios.rst b/akka-docs/intro/deployment-scenarios.rst index 829d93829e..a5da196d24 100644 --- a/akka-docs/intro/deployment-scenarios.rst +++ b/akka-docs/intro/deployment-scenarios.rst @@ -29,12 +29,12 @@ Actors as services The simplest way you can use Akka is to use the actors as services in your Web application. All that’s needed to do that is to put the Akka charts as well as -its dependency jars into ``WEB-INF/lib``. You also need to put the ``akka.conf`` -config file in the ``$AKKA_HOME/config`` directory. Now you can create your +its dependency jars into ``WEB-INF/lib``. You also need to put the :ref:`configuration` +file in the ``$AKKA_HOME/config`` directory. Now you can create your Actors as regular services referenced from your Web application. You should also be able to use the Remoting service, e.g. be able to make certain Actors remote on other hosts. Please note that remoting service does not speak HTTP over port -80, but a custom protocol over the port is specified in ``akka.conf``. +80, but a custom protocol over the port is specified in :ref:`configuration`. Using Akka as a stand alone microkernel diff --git a/akka-docs/intro/getting-started-first-java.rst b/akka-docs/intro/getting-started-first-java.rst index 9ae9e87441..bf694e5fe2 100644 --- a/akka-docs/intro/getting-started-first-java.rst +++ b/akka-docs/intro/getting-started-first-java.rst @@ -729,18 +729,12 @@ we compiled ourselves:: $ java \ -cp lib/scala-library.jar:lib/akka/akka-actor-2.0-SNAPSHOT.jar:tutorial \ akka.tutorial.java.first.Pi - AKKA_HOME is defined as [/Users/jboner/tools/akka-actors-2.0-SNAPSHOT] - loading config from [/Users/jboner/tools/akka-actors-2.0-SNAPSHOT/config/akka.conf]. Pi estimate: 3.1435501812459323 Calculation time: 822 millis Yippee! It is working. -If you have not defined the ``AKKA_HOME`` environment variable then Akka can't -find the ``akka.conf`` configuration file and will print out a ``Can’t load -akka.conf`` warning. This is ok since it will then just use the defaults. - Run it inside Maven ------------------- @@ -758,8 +752,6 @@ When this in done we can run our application directly inside Maven:: Yippee! It is working. -If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. - Conclusion ---------- diff --git a/akka-docs/intro/getting-started-first-scala-eclipse.rst b/akka-docs/intro/getting-started-first-scala-eclipse.rst index 45fbfd24ce..487d8b4509 100644 --- a/akka-docs/intro/getting-started-first-scala-eclipse.rst +++ b/akka-docs/intro/getting-started-first-scala-eclipse.rst @@ -382,15 +382,10 @@ Run it from Eclipse Eclipse builds your project on every save when ``Project/Build Automatically`` is set. If not, bring you project up to date by clicking ``Project/Build Project``. If there are no compilation errors, you can right-click in the editor where ``Pi`` is defined, and choose ``Run as.. /Scala application``. If everything works fine, you should see:: - AKKA_HOME is defined as [/Users/jboner/tools/akka-actors-2.0-SNAPSHOT] - loading config from [/Users/jboner/tools/akka-actors-2.0-SNAPSHOT/config/akka.conf]. - Pi estimate: 3.1435501812459323 Calculation time: 858 millis -If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. - -You can also define a new Run configuration, by going to ``Run/Run Configurations``. Create a new ``Scala application`` and choose the tutorial project and the main class to be ``akkatutorial.Pi``. You can pass additional command line arguments to the JVM on the ``Arguments`` page, for instance to define where ``akka.conf`` is: +You can also define a new Run configuration, by going to ``Run/Run Configurations``. Create a new ``Scala application`` and choose the tutorial project and the main class to be ``akkatutorial.Pi``. You can pass additional command line arguments to the JVM on the ``Arguments`` page, for instance to define where :ref:`configuration` is: .. image:: ../images/run-config.png diff --git a/akka-docs/intro/getting-started-first-scala.rst b/akka-docs/intro/getting-started-first-scala.rst index 73aace96bf..6a8720f843 100644 --- a/akka-docs/intro/getting-started-first-scala.rst +++ b/akka-docs/intro/getting-started-first-scala.rst @@ -424,19 +424,12 @@ compiled ourselves:: $ java \ -cp lib/scala-library.jar:lib/akka/akka-actor-2.0-SNAPSHOT.jar:. \ akka.tutorial.first.scala.Pi - AKKA_HOME is defined as [/Users/jboner/tools/akka-actors-2.0-SNAPSHOT] - loading config from [/Users/jboner/tools/akka-actors-2.0-SNAPSHOT/config/akka.conf]. Pi estimate: 3.1435501812459323 Calculation time: 858 millis Yippee! It is working. -If you have not defined the ``AKKA_HOME`` environment variable then Akka can't -find the ``akka.conf`` configuration file and will print out a ``Can’t load -akka.conf`` warning. This is ok since it will then just use the defaults. - - Run it inside SBT ================= @@ -456,11 +449,6 @@ When this in done we can run our application directly inside SBT:: Yippee! It is working. -If you have not defined an the ``AKKA_HOME`` environment variable then Akka -can't find the ``akka.conf`` configuration file and will print out a ``Can’t -load akka.conf`` warning. This is ok since it will then just use the defaults. - - Conclusion ========== diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index dc2684f9d8..883efe60ba 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -19,7 +19,7 @@ Default dispatcher ------------------ For most scenarios the default settings are the best. Here we have one single event-based dispatcher for all Actors created. The default dispatcher used is "GlobalDispatcher" which also is retrievable in ``akka.dispatch.Dispatchers.globalDispatcher``. -The Dispatcher specified in the akka.conf as "default-dispatcher" is as ``Dispatchers.defaultGlobalDispatcher``. +The Dispatcher specified in the :ref:`configuration` as "default-dispatcher" is as ``Dispatchers.defaultGlobalDispatcher``. The "GlobalDispatcher" is not configurable but will use default parameters given by Akka itself. @@ -124,16 +124,13 @@ Here is an example: ... } -This 'Dispatcher' allows you to define the 'throughput' it should have. This defines the number of messages for a specific Actor the dispatcher should process in one single sweep. -Setting this to a higher number will increase throughput but lower fairness, and vice versa. If you don't specify it explicitly then it uses the default value defined in the 'akka.conf' configuration file: - -.. code-block:: xml - - actor { - throughput = 5 - } - -If you don't define a the 'throughput' option in the configuration file then the default value of '5' will be used. +The standard :class:`Dispatcher` allows you to define the ``throughput`` it +should have, as shown above. This defines the number of messages for a specific +Actor the dispatcher should process in one single sweep; in other words, the +dispatcher will bunch up to ``throughput`` message invocations together when +having elected an actor to run. Setting this to a higher number will increase +throughput but lower fairness, and vice versa. If you don't specify it explicitly +then it uses the value (5) defined for ``default-dispatcher`` in the :ref:`configuration`. Browse the :ref:`scaladoc` or look at the code for all the options available. diff --git a/akka-docs/java/futures.rst b/akka-docs/java/futures.rst index 2715ff33d1..694a15c8b0 100644 --- a/akka-docs/java/futures.rst +++ b/akka-docs/java/futures.rst @@ -42,7 +42,7 @@ A common use case within Akka is to have some computation performed concurrently return "Hello" + "World!"; } }); - String result = f.get(); //Blocks until timeout, default timeout is set in akka.conf, otherwise 5 seconds + String result = f.get(); //Blocks until timeout, default timeout is set in :ref:`configuration`, otherwise 5 seconds In the above code the block passed to ``future`` will be executed by the default ``Dispatcher``, with the return value of the block used to complete the ``Future`` (in this case, the result would be the string: "HelloWorld"). Unlike a ``Future`` that is returned from an ``UntypedActor``, this ``Future`` is properly typed, and we also avoid the overhead of managing an ``UntypedActor``. diff --git a/akka-docs/java/stm.rst b/akka-docs/java/stm.rst index 67917e7e77..3cbf390bd1 100644 --- a/akka-docs/java/stm.rst +++ b/akka-docs/java/stm.rst @@ -182,23 +182,7 @@ The following settings are possible on a TransactionFactory: - propagation - For controlling how nested transactions behave. - traceLevel - Transaction trace level. -You can also specify the default values for some of these options in akka.conf. Here they are with their default values: - -:: - - stm { - fair = on # Should global transactions be fair or non-fair (non fair yield better performance) - max-retries = 1000 - timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by - # the time-unit property) - write-skew = true - blocking-allowed = false - interruptible = false - speculative = true - quick-release = true - propagation = "requires" - trace-level = "none" - } +You can also specify the default values for some of these options in :ref:`configuration`. Transaction lifecycle listeners ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/akka-docs/java/typed-actors.rst b/akka-docs/java/typed-actors.rst index 2eb02f6ebc..8f39ecde76 100644 --- a/akka-docs/java/typed-actors.rst +++ b/akka-docs/java/typed-actors.rst @@ -185,7 +185,7 @@ Messages and immutability **IMPORTANT**: Messages can be any kind of object but have to be immutable (there is a workaround, see next section). Java or Scala can’t enforce immutability (yet) so this has to be by convention. Primitives like String, int, Long are always immutable. Apart from these you have to create your own immutable objects to send as messages. If you pass on a reference to an instance that is mutable then this instance can be modified concurrently by two different Typed Actors and the Actor model is broken leaving you with NO guarantees and most likely corrupt data. -Akka can help you in this regard. It allows you to turn on an option for serializing all messages, e.g. all parameters to the Typed Actor effectively making a deep clone/copy of the parameters. This will make sending mutable messages completely safe. This option is turned on in the ‘$AKKA_HOME/config/akka.conf’ config file like this: +Akka can help you in this regard. It allows you to turn on an option for serializing all messages, e.g. all parameters to the Typed Actor effectively making a deep clone/copy of the parameters. This will make sending mutable messages completely safe. This option is turned on in the :ref:`configuration` file like this: .. code-block:: ruby diff --git a/akka-docs/modules/camel.rst b/akka-docs/modules/camel.rst index b3c07e56dd..8b2b84c992 100644 --- a/akka-docs/modules/camel.rst +++ b/akka-docs/modules/camel.rst @@ -1522,7 +1522,7 @@ CamelService configuration For publishing consumer actors and typed actor methods (:ref:`camel-publishing`), applications must start a CamelService. When starting Akka in :ref:`microkernel` mode then a CamelService can be started automatically -when camel is added to the enabled-modules list in akka.conf, for example: +when camel is added to the enabled-modules list in :ref:`configuration`, for example: .. code-block:: none @@ -1535,7 +1535,7 @@ when camel is added to the enabled-modules list in akka.conf, for example: Applications that do not use the Akka Kernel, such as standalone applications for example, need to start a CamelService manually, as explained in the following subsections.When starting a CamelService manually, settings in -akka.conf are ignored. +:ref:`configuration` are ignored. Standalone applications @@ -1771,7 +1771,7 @@ CamelService can be omitted, as discussed in the previous section. Since these classes are loaded and instantiated before the CamelService is started (by Akka), applications can make modifications to a CamelContext here as well (and even provide their own CamelContext). Assuming there's a boot class -sample.camel.Boot configured in akka.conf. +sample.camel.Boot configured in :ref:`configuration`. .. code-block:: none @@ -2439,8 +2439,7 @@ Examples For all features described so far, there's running sample code in `akka-sample-camel`_. The examples in `sample.camel.Boot`_ are started during -Kernel startup because this class has been added to the boot configuration in -akka-reference.conf. +Kernel startup because this class has been added to the boot :ref:`configuration`. .. _akka-sample-camel: http://github.com/jboner/akka/tree/master/akka-samples/akka-sample-camel/ .. _sample.camel.Boot: http://github.com/jboner/akka/blob/master/akka-samples/akka-sample-camel/src/main/scala/sample/camel/Boot.scala @@ -2454,8 +2453,7 @@ akka-reference.conf. } If you don't want to have these examples started during Kernel startup, delete -it from akka-reference.conf (or from akka.conf if you have a custom boot -configuration). Other examples are standalone applications (i.e. classes with a +it from the :ref:`configuration`. Other examples are standalone applications (i.e. classes with a main method) that can be started from `sbt`_. .. _sbt: http://code.google.com/p/simple-build-tool/ diff --git a/akka-docs/modules/microkernel.rst b/akka-docs/modules/microkernel.rst index c7a9014e14..cbf9ba96ba 100644 --- a/akka-docs/modules/microkernel.rst +++ b/akka-docs/modules/microkernel.rst @@ -11,10 +11,9 @@ Run the microkernel To start the kernel use the scripts in the ``bin`` directory. -All services are configured in the ``config/akka.conf`` configuration file. See -the Akka documentation on Configuration for more details. Services you want to -be started up automatically should be listed in the list of ``boot`` classes in -the configuration. +All services are configured in the :ref:`configuration` file in the ``config`` directory. +Services you want to be started up automatically should be listed in the list of ``boot`` classes in +the :ref:`configuration`. Put your application in the ``deploy`` directory. diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index e16c336753..fb09c8e5ae 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -120,17 +120,8 @@ should have, as shown above. This defines the number of messages for a specific Actor the dispatcher should process in one single sweep; in other words, the dispatcher will bunch up to ``throughput`` message invocations together when having elected an actor to run. Setting this to a higher number will increase -throughput but lower fairness, and vice versa. If you don't specify it -explicitly then it uses the default value defined in the 'akka.conf' -configuration file: - -.. code-block:: ruby - - actor { - throughput = 5 - } - -If you don't define a the 'throughput' option in the configuration file then the default value of '5' will be used. +throughput but lower fairness, and vice versa. If you don't specify it explicitly +then it uses the value (5) defined for ``default-dispatcher`` in the :ref:`configuration`. Browse the `ScalaDoc `_ or look at the code for all the options available. diff --git a/akka-docs/scala/fsm.rst b/akka-docs/scala/fsm.rst index 48d716c53b..fb1f54ff26 100644 --- a/akka-docs/scala/fsm.rst +++ b/akka-docs/scala/fsm.rst @@ -498,7 +498,7 @@ and in the following. Event Tracing ------------- -The setting ``akka.actor.debug.fsm`` in ``akka.conf`` enables logging of an +The setting ``akka.actor.debug.fsm`` in `:ref:`configuration` enables logging of an event trace by :class:`LoggingFSM` instances:: class MyFSM extends Actor with LoggingFSM[X, Z] { diff --git a/akka-docs/scala/futures.rst b/akka-docs/scala/futures.rst index ba7b8bb73e..623a24730a 100644 --- a/akka-docs/scala/futures.rst +++ b/akka-docs/scala/futures.rst @@ -244,7 +244,7 @@ In this example, if an ``ArithmeticException`` was thrown while the ``Actor`` pr Timeouts -------- -Waiting forever for a ``Future`` to be completed can be dangerous. It could cause your program to block indefinitly or produce a memory leak. ``Future`` has support for a timeout already builtin with a default of 5 seconds (taken from 'akka.conf'). A timeout is an instance of ``akka.actor.Timeout`` which contains an ``akka.util.Duration``. A ``Duration`` can be finite, which needs a length and unit type, or infinite. An infinite ``Timeout`` can be dangerous since it will never actually expire. +Waiting forever for a ``Future`` to be completed can be dangerous. It could cause your program to block indefinitly or produce a memory leak. ``Future`` has support for a timeout already builtin with a default of 5 seconds (taken from :ref:`configuration`). A timeout is an instance of ``akka.actor.Timeout`` which contains an ``akka.util.Duration``. A ``Duration`` can be finite, which needs a length and unit type, or infinite. An infinite ``Timeout`` can be dangerous since it will never actually expire. A different ``Timeout`` can be supplied either explicitly or implicitly when a ``Future`` is created. An implicit ``Timeout`` has the benefit of being usable by a for-comprehension as well as being picked up by any methods looking for an implicit ``Timeout``, while an explicit ``Timeout`` can be used in a more controlled manner. diff --git a/akka-docs/scala/stm.rst b/akka-docs/scala/stm.rst index a35fb94676..f21f988939 100644 --- a/akka-docs/scala/stm.rst +++ b/akka-docs/scala/stm.rst @@ -271,23 +271,7 @@ The following settings are possible on a TransactionFactory: - ``propagation`` - For controlling how nested transactions behave. - ``traceLevel`` - Transaction trace level. -You can also specify the default values for some of these options in ``akka.conf``. Here they are with their default values: - -:: - - stm { - fair = on # Should global transactions be fair or non-fair (non fair yield better performance) - max-retries = 1000 - timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by - # the time-unit property) - write-skew = true - blocking-allowed = false - interruptible = false - speculative = true - quick-release = true - propagation = "requires" - trace-level = "none" - } +You can also specify the default values for some of these options in the :ref:`configuration`. You can also determine at which level a transaction factory is shared or not shared, which affects the way in which the STM can optimise transactions. diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index 8b94f301d5..c9a2b5928e 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -457,7 +457,7 @@ Accounting for Slow Test Systems The tight timeouts you use during testing on your lightning-fast notebook will invariably lead to spurious test failures on the heavily loaded Jenkins server (or similar). To account for this situation, all maximum durations are -internally scaled by a factor taken from ``akka.conf``, +internally scaled by a factor taken from the :ref:`configuration`, ``akka.test.timefactor``, which defaults to 1. Resolving Conflicts with Implicit ActorRef @@ -716,7 +716,7 @@ options: * *Logging of message invocations on certain actors* - This is enabled by a setting in ``akka.conf`` — namely + This is enabled by a setting in the :ref:`configuration` — namely ``akka.actor.debug.receive`` — which enables the :meth:`loggable` statement to be applied to an actor’s :meth:`receive` function:: @@ -728,7 +728,7 @@ options: The first argument to :meth:`LoggingReceive` defines the source to be used in the logging events, which should be the current actor. - If the abovementioned setting is not given in ``akka.conf``, this method will + If the abovementioned setting is not given in the :ref:`configuration`, this method will pass through the given :class:`Receive` function unmodified, meaning that there is no runtime cost unless actually enabled. diff --git a/akka-docs/scala/typed-actors.rst b/akka-docs/scala/typed-actors.rst index bb3bb1e7b3..295fba6632 100644 --- a/akka-docs/scala/typed-actors.rst +++ b/akka-docs/scala/typed-actors.rst @@ -178,7 +178,7 @@ Messages and immutability **IMPORTANT**: Messages can be any kind of object but have to be immutable (there is a workaround, see next section). Java or Scala can’t enforce immutability (yet) so this has to be by convention. Primitives like String, int, Long are always immutable. Apart from these you have to create your own immutable objects to send as messages. If you pass on a reference to an instance that is mutable then this instance can be modified concurrently by two different Typed Actors and the Actor model is broken leaving you with NO guarantees and most likely corrupt data. -Akka can help you in this regard. It allows you to turn on an option for serializing all messages, e.g. all parameters to the Typed Actor effectively making a deep clone/copy of the parameters. This will make sending mutable messages completely safe. This option is turned on in the ‘$AKKA_HOME/config/akka.conf’ config file like this: +Akka can help you in this regard. It allows you to turn on an option for serializing all messages, e.g. all parameters to the Typed Actor effectively making a deep clone/copy of the parameters. This will make sending mutable messages completely safe. This option is turned on in the :ref:`configuration` file like this: .. code-block:: ruby From 15c0462db3bde60f4151060f1cf7d11c4514b930 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 9 Dec 2011 18:28:43 +0100 Subject: [PATCH 09/27] Added sbteclipse plugin to the build (version 1.5.0) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .gitignore | 1 + .history | 4 ---- project/plugins.sbt | 2 ++ 3 files changed, 3 insertions(+), 4 deletions(-) delete mode 100644 .history diff --git a/.gitignore b/.gitignore index 91eba2fc6b..203d9a1ef5 100755 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ reports dist target deploy/*.jar +.history data out logs diff --git a/.history b/.history deleted file mode 100644 index 7bbf31e478..0000000000 --- a/.history +++ /dev/null @@ -1,4 +0,0 @@ -update -reload -projects -exit diff --git a/project/plugins.sbt b/project/plugins.sbt index e298278e00..b9dfcde215 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -3,6 +3,8 @@ resolvers += Classpaths.typesafeResolver addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.7") +addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse" % "1.5.0") + addSbtPlugin("com.typesafe.sbtscalariform" % "sbtscalariform" % "0.3.1") resolvers ++= Seq( From 4d649c39036b040442571be4f0e753873916ef12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 9 Dec 2011 18:44:59 +0100 Subject: [PATCH 10/27] =?UTF-8?q?Removed=20all=20@author=20tags=20for=20Jo?= =?UTF-8?q?nas=20Bon=C3=A9r=20since=20it=20has=20lost=20its=20meaning.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../actor/dispatch/DispatcherActorsSpec.scala | 2 -- .../src/main/scala/akka/AkkaException.scala | 2 -- .../src/main/scala/akka/actor/Actor.scala | 2 -- .../src/main/scala/akka/actor/ActorRef.scala | 4 ---- .../src/main/scala/akka/actor/Deployer.scala | 4 ---- .../scala/akka/actor/DeploymentConfig.scala | 2 -- .../src/main/scala/akka/actor/FSM.scala | 1 - .../main/scala/akka/actor/UntypedActor.scala | 4 ---- .../scala/akka/cluster/ClusterInterface.scala | 4 ---- .../akka/dispatch/AbstractDispatcher.scala | 6 ------ .../akka/dispatch/BalancingDispatcher.scala | 2 -- .../main/scala/akka/dispatch/Dispatcher.scala | 3 +-- .../scala/akka/dispatch/Dispatchers.scala | 4 +--- .../main/scala/akka/dispatch/Mailbox.scala | 3 --- .../akka/dispatch/PinnedDispatcher.scala | 2 -- .../akka/dispatch/ThreadPoolBuilder.scala | 9 --------- .../src/main/scala/akka/experimental.scala | 1 - .../akka/routing/ConnectionManager.scala | 2 -- .../scala/akka/routing/ConsistentHash.scala | 2 -- .../src/main/scala/akka/routing/Routing.scala | 10 ---------- .../scala/akka/serialization/Format.scala | 1 - .../src/main/scala/akka/util/Convert.scala | 3 --- .../src/main/scala/akka/util/Crypt.scala | 3 --- .../src/main/scala/akka/util/HashCode.scala | 2 -- .../src/main/scala/akka/util/Helpers.scala | 7 ++----- .../src/main/scala/akka/util/Index.scala | 4 ---- akka-actor/src/main/scala/akka/util/JMX.scala | 3 --- .../scala/akka/util/ListenerManagement.scala | 2 -- .../src/main/scala/akka/util/LockUtil.scala | 3 --- .../scala/akka/cluster/BookKeeperServer.scala | 3 --- .../src/main/scala/akka/cluster/Cluster.scala | 20 ------------------- .../scala/akka/cluster/ClusterActorRef.scala | 4 ---- .../scala/akka/cluster/ClusterDeployer.scala | 4 ---- .../scala/akka/cluster/TransactionLog.scala | 11 +--------- .../cluster/sample/ComputeGridSample.scala | 3 --- .../akka/actor/mailbox/DurableMailbox.scala | 8 -------- .../actor/mailbox/RedisBasedMailbox.scala | 3 --- .../actor/mailbox/ZooKeeperBasedMailbox.scala | 3 --- .../src/main/scala/akka/kernel/Kernel.scala | 2 -- .../akka/remote/NetworkEventStream.scala | 2 -- .../src/main/scala/akka/remote/Remote.scala | 4 ---- .../akka/remote/RemoteActorRefProvider.scala | 4 ---- .../akka/remote/RemoteConnectionManager.scala | 2 -- .../remote/netty/NettyRemoteSupport.scala | 16 +-------------- .../akka/serialization/Compression.scala | 6 ------ .../main/scala/akka/event/slf4j/SLF4J.scala | 4 ---- .../scala/akka/spring/ActorFactoryBean.scala | 1 - scripts/generate_config_with_secure_cookie.sh | 11 ++++------ 48 files changed, 10 insertions(+), 198 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorsSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorsSpec.scala index 71a03c6e01..df4048a56e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorsSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorsSpec.scala @@ -6,8 +6,6 @@ import akka.testkit.AkkaSpec /** * Tests the behavior of the executor based event driven dispatcher when multiple actors are being dispatched on it. - * - * @author Jan Van Besien */ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class DispatcherActorsSpec extends AkkaSpec { diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 0dc3a81728..b8d83abf7a 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -14,8 +14,6 @@ import java.net.{ InetAddress, UnknownHostException } *
  • toString that includes exception name, message and uuid
  • *
  • toLongString which also includes the stack trace
  • * - * - * @author Jonas Bonér */ class AkkaException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) with Serializable { val uuid = "%s_%s".format(AkkaException.hostname, newUuid) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 88936f2c86..df1bb0e6a5 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -167,8 +167,6 @@ object Actor { * *

    * The Actor's own ActorRef is available in the 'self' member variable. - * - * @author Jonas Bonér */ trait Actor { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index c6f6333822..a5253c440b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -45,8 +45,6 @@ import scala.annotation.tailrec * * * The natural ordering of ActorRef is defined in terms of its [[akka.actor.ActorPath]]. - * - * @author Jonas Bonér */ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable { scalaRef: InternalActorRef ⇒ @@ -214,8 +212,6 @@ private[akka] case object Nobody extends MinimalActorRef { /** * Local (serializable) ActorRef that is used when referencing the Actor on its "home" node. - * - * @author Jonas Bonér */ class LocalActorRef private[akka] ( system: ActorSystemImpl, diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 52381dc60b..62b4e6b818 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -30,8 +30,6 @@ trait ActorDeployer { /** * Deployer maps actor paths to actor deployments. - * - * @author Jonas Bonér */ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, val nodename: String) extends ActorDeployer { @@ -267,8 +265,6 @@ class Deployer(val settings: ActorSystem.Settings, val eventStream: EventStream, /** * Simple local deployer, only for internal use. - * - * @author Jonas Bonér */ class LocalDeployer extends ActorDeployer { private val deployments = new ConcurrentHashMap[String, Deploy] diff --git a/akka-actor/src/main/scala/akka/actor/DeploymentConfig.scala b/akka-actor/src/main/scala/akka/actor/DeploymentConfig.scala index 9a3d934f01..03e6aef683 100644 --- a/akka-actor/src/main/scala/akka/actor/DeploymentConfig.scala +++ b/akka-actor/src/main/scala/akka/actor/DeploymentConfig.scala @@ -215,8 +215,6 @@ object DeploymentConfig { * Module holding the programmatic deployment configuration classes. * Defines the deployment specification. * Most values have defaults and can be left out. - * - * @author Jonas Bonér */ class DeploymentConfig(val nodename: String) { diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 9419fccc60..ce7e7f8318 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -564,7 +564,6 @@ trait FSM[S, D] extends ListenerManagement { /** * Stackable trait for FSM which adds a rolling event log. * - * @author Roland Kuhn * @since 1.2 */ trait LoggingFSM[S, D] extends FSM[S, D] { this: Actor ⇒ diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index 8c10a9fcf0..1692396a8f 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -48,8 +48,6 @@ import akka.dispatch.{ MessageDispatcher, Promise } * } * } * - * - * @author Jonas Bonér */ abstract class UntypedActor extends Actor { @@ -123,7 +121,5 @@ abstract class UntypedActor extends Actor { /** * Factory closure for an UntypedActor, to be used with 'Actors.actorOf(factory)'. - * - * @author Jonas Bonér */ trait UntypedActorFactory extends Creator[Actor] diff --git a/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala b/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala index 98a9f9f188..d88b5c3440 100644 --- a/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala +++ b/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala @@ -84,8 +84,6 @@ object ChangeListener { /** * Node address holds the node name and the cluster name and can be used as a hash lookup key for a Node instance. - * - * @author Jonas Bonér */ class NodeAddress(val clusterName: String, val nodeName: String) { if ((clusterName eq null) || clusterName == "") throw new NullPointerException("Cluster name must not be null or empty string") @@ -183,8 +181,6 @@ trait NodeMetricsManager { /** * Interface for cluster node. - * - * @author Jonas Bonér */ trait ClusterNode { import ChangeListener._ diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 8b6f66f4c7..3c9143674d 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -17,9 +17,6 @@ import akka.event.EventStream import akka.actor.ActorSystem.Settings import com.typesafe.config.Config -/** - * @author Jonas Bonér - */ final case class Envelope(val message: Any, val sender: ActorRef) { if (message.isInstanceOf[AnyRef] && (message.asInstanceOf[AnyRef] eq null)) throw new InvalidMessageException("Message is null") } @@ -86,9 +83,6 @@ object MessageDispatcher { implicit def defaultDispatcher(implicit system: ActorSystem) = system.dispatcher } -/** - * @author Jonas Bonér - */ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) extends AbstractMessageDispatcher with Serializable { import MessageDispatcher._ diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index 9dd0733328..96477b0d56 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -28,8 +28,6 @@ import akka.util.Duration * * @see akka.dispatch.BalancingDispatcher * @see akka.dispatch.Dispatchers - * - * @author Viktor Klang */ class BalancingDispatcher( _prerequisites: DispatcherPrerequisites, diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 1a40ee23cd..02c84b3099 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -55,7 +55,6 @@ import java.util.concurrent._ * But the preferred way of creating dispatchers is to use * the {@link akka.dispatch.Dispatchers} factory object. * - * @author Jonas Bonér * @param throughput positive integer indicates the dispatcher will only process so much messages at a time from the * mailbox, without checking the mailboxes of other actors. Zero or negative means the dispatcher * always continues until the mailbox is empty. @@ -153,4 +152,4 @@ abstract class PriorityGenerator extends java.util.Comparator[Envelope] { final def compare(thisMessage: Envelope, thatMessage: Envelope): Int = gen(thisMessage.message) - gen(thatMessage.message) -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index a75eca9101..ddae9654c4 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -53,8 +53,6 @@ case class DefaultDispatcherPrerequisites( * .build(); * *

    - * - * @author Jonas Bonér */ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: DispatcherPrerequisites) { @@ -199,7 +197,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc /* * Creates of obtains a dispatcher from a ConfigMap according to the format below. - * Uses default values from default-dispatcher. + * Uses default values from default-dispatcher. * * my-dispatcher { * type = "Dispatcher" # Must be one of the following diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index cd33ca57a7..27b8f039d1 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -33,9 +33,6 @@ object Mailbox { final val debug = false } -/** - * @author Jonas Bonér - */ abstract class Mailbox(val actor: ActorCell) extends MessageQueue with SystemMessageQueue with Runnable { import Mailbox._ diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala index ed0b3cde99..2faffb9f28 100644 --- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala @@ -14,8 +14,6 @@ import java.util.concurrent.TimeUnit /** * Dedicates a unique thread for each actor passed in as reference. Served through its messageQueue. - * - * @author Jonas Bonér */ class PinnedDispatcher( _prerequisites: DispatcherPrerequisites, diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index d26842cc3b..d58444c166 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -144,9 +144,6 @@ case class ThreadPoolConfigDispatcherBuilder(dispatcherFactory: (ThreadPoolConfi def configure(fs: Option[Function[ThreadPoolConfigDispatcherBuilder, ThreadPoolConfigDispatcherBuilder]]*): ThreadPoolConfigDispatcherBuilder = fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) } -/** - * @author Jonas Bonér - */ class MonitorableThreadFactory(val name: String, val daemonic: Boolean = false) extends ThreadFactory { protected val counter = new AtomicLong @@ -157,9 +154,6 @@ class MonitorableThreadFactory(val name: String, val daemonic: Boolean = false) } } -/** - * @author Jonas Bonér - */ object MonitorableThread { val DEFAULT_NAME = "MonitorableThread".intern @@ -168,9 +162,6 @@ object MonitorableThread { val alive = new AtomicInteger } -/** - * @author Jonas Bonér - */ class MonitorableThread(runnable: Runnable, name: String) extends Thread(runnable, name + "-" + MonitorableThread.created.incrementAndGet) { diff --git a/akka-actor/src/main/scala/akka/experimental.scala b/akka-actor/src/main/scala/akka/experimental.scala index cfc976551a..c37197a10d 100644 --- a/akka-actor/src/main/scala/akka/experimental.scala +++ b/akka-actor/src/main/scala/akka/experimental.scala @@ -10,7 +10,6 @@ import annotation.target._ * This annotation marks a feature which is not yet considered stable and may * change or be removed in a future release. * - * @author Roland Kuhn * @since 1.2 */ @getter diff --git a/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala b/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala index 6e45a50cad..572bd986ee 100644 --- a/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala +++ b/akka-actor/src/main/scala/akka/routing/ConnectionManager.scala @@ -26,8 +26,6 @@ trait VersionedIterable[A] { /** * Manages connections (ActorRefs) for a router. - * - * @author Jonas Bonér */ trait ConnectionManager { /** diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala index 3abadd01d8..ead70b4b7a 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala @@ -15,8 +15,6 @@ import scala.collection.mutable.{ Buffer, Map } * Consistent Hashing node ring abstraction. * * Not thread-safe, to be used from within an Actor or protected some other way. - * - * @author Jonas Bonér */ class ConsistentHash[T](nodes: Seq[T], replicas: Int) { private val cluster = Buffer[T]() diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 3060f1b847..a71f206959 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -19,8 +19,6 @@ sealed trait RouterType /** * Used for declarative configuration of Routing. - * - * @author Jonas Bonér */ object RouterType { @@ -92,8 +90,6 @@ object RoutedProps { /** * The Router is responsible for sending a message to one (or more) of its connections. Connections are stored in the * {@link FailureDetector} and each Router should be linked to only one {@link FailureDetector}. - * - * @author Jonas Bonér */ trait Router { @@ -296,8 +292,6 @@ class BroadcastRouter(implicit val dispatcher: MessageDispatcher, timeout: Timeo /** * A DirectRouter a Router that only has a single connected actorRef and forwards all request to that actorRef. - * - * @author Jonas Bonér */ class DirectRouter(implicit val dispatcher: MessageDispatcher, timeout: Timeout) extends BasicRouter { @@ -338,8 +332,6 @@ class DirectRouter(implicit val dispatcher: MessageDispatcher, timeout: Timeout) /** * A Router that randomly selects one of the target connections to send a message to. - * - * @author Jonas Bonér */ class RandomRouter(implicit val dispatcher: MessageDispatcher, timeout: Timeout) extends BasicRouter { import java.security.SecureRandom @@ -380,8 +372,6 @@ class RandomRouter(implicit val dispatcher: MessageDispatcher, timeout: Timeout) /** * A Router that uses round-robin to select a connection. For concurrent calls, round robin is just a best effort. - * - * @author Jonas Bonér */ class RoundRobinRouter(implicit val dispatcher: MessageDispatcher, timeout: Timeout) extends BasicRouter { diff --git a/akka-actor/src/main/scala/akka/serialization/Format.scala b/akka-actor/src/main/scala/akka/serialization/Format.scala index 43177862d4..41f9e9ce73 100644 --- a/akka-actor/src/main/scala/akka/serialization/Format.scala +++ b/akka-actor/src/main/scala/akka/serialization/Format.scala @@ -7,7 +7,6 @@ package akka.serialization import akka.actor.Actor /** - * @author Jonas Bonér * trait Serializer extends scala.Serializable { * @volatile * var classLoader: Option[ClassLoader] = None diff --git a/akka-actor/src/main/scala/akka/util/Convert.scala b/akka-actor/src/main/scala/akka/util/Convert.scala index 278b94f422..42a98f3849 100644 --- a/akka-actor/src/main/scala/akka/util/Convert.scala +++ b/akka-actor/src/main/scala/akka/util/Convert.scala @@ -4,9 +4,6 @@ package akka.util -/** - * @author Jonas Bonér - */ object Convert { def intToBytes(value: Int): Array[Byte] = { diff --git a/akka-actor/src/main/scala/akka/util/Crypt.scala b/akka-actor/src/main/scala/akka/util/Crypt.scala index 2507b0e421..50e8c881a6 100644 --- a/akka-actor/src/main/scala/akka/util/Crypt.scala +++ b/akka-actor/src/main/scala/akka/util/Crypt.scala @@ -6,9 +6,6 @@ package akka.util import java.security.{ MessageDigest, SecureRandom } -/** - * @author Jonas Bonér - */ object Crypt { val hex = "0123456789ABCDEF" val lineSeparator = System.getProperty("line.separator") diff --git a/akka-actor/src/main/scala/akka/util/HashCode.scala b/akka-actor/src/main/scala/akka/util/HashCode.scala index d515a57ec5..40b740bfd6 100644 --- a/akka-actor/src/main/scala/akka/util/HashCode.scala +++ b/akka-actor/src/main/scala/akka/util/HashCode.scala @@ -21,8 +21,6 @@ import java.lang.{ Float ⇒ JFloat, Double ⇒ JDouble } * result * } * - * - * @author Jonas Bonér */ object HashCode { val SEED = 23 diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index 830ec28881..c656ab37b1 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -8,17 +8,14 @@ import java.util.Comparator import scala.annotation.tailrec import java.util.regex.Pattern -/** - * @author Jonas Bonér - */ object Helpers { def makePattern(s: String): Pattern = Pattern.compile("^\\Q" + s.replace("?", "\\E.\\Q").replace("*", "\\E.*\\Q") + "\\E$") def compareIdentityHash(a: AnyRef, b: AnyRef): Int = { /* - * make sure that there is no overflow or underflow in comparisons, so - * that the ordering is actually consistent and you cannot have a + * make sure that there is no overflow or underflow in comparisons, so + * that the ordering is actually consistent and you cannot have a * sequence which cyclically is monotone without end. */ val diff = ((System.identityHashCode(a) & 0xffffffffL) - (System.identityHashCode(b) & 0xffffffffL)) diff --git a/akka-actor/src/main/scala/akka/util/Index.scala b/akka-actor/src/main/scala/akka/util/Index.scala index 3b0f68eabe..b7cb1a74a4 100644 --- a/akka-actor/src/main/scala/akka/util/Index.scala +++ b/akka-actor/src/main/scala/akka/util/Index.scala @@ -13,8 +13,6 @@ import scala.collection.mutable * An implementation of a ConcurrentMultiMap * Adds/remove is serialized over the specified key * Reads are fully concurrent <-- el-cheapo - * - * @author Viktor Klang */ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { @@ -192,7 +190,5 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { * An implementation of a ConcurrentMultiMap * Adds/remove is serialized over the specified key * Reads are fully concurrent <-- el-cheapo - * - * @author Viktor Klang */ class ConcurrentMultiMap[K, V](mapSize: Int, valueComparator: Comparator[V]) extends Index[K, V](mapSize, valueComparator) diff --git a/akka-actor/src/main/scala/akka/util/JMX.scala b/akka-actor/src/main/scala/akka/util/JMX.scala index 2c87524843..bcfd5d2477 100644 --- a/akka-actor/src/main/scala/akka/util/JMX.scala +++ b/akka-actor/src/main/scala/akka/util/JMX.scala @@ -9,9 +9,6 @@ import java.lang.management.ManagementFactory import javax.management.{ ObjectInstance, ObjectName, InstanceAlreadyExistsException, InstanceNotFoundException } import akka.actor.ActorSystem -/** - * @author Jonas Bonér - */ object JMX { private val mbeanServer = ManagementFactory.getPlatformMBeanServer diff --git a/akka-actor/src/main/scala/akka/util/ListenerManagement.scala b/akka-actor/src/main/scala/akka/util/ListenerManagement.scala index fad8f5b20a..3efbcbc902 100644 --- a/akka-actor/src/main/scala/akka/util/ListenerManagement.scala +++ b/akka-actor/src/main/scala/akka/util/ListenerManagement.scala @@ -9,8 +9,6 @@ import akka.actor.{ ActorInitializationException, ActorRef } /** * A manager for listener actors. Intended for mixin by observables. - * - * @author Martin Krasser */ trait ListenerManagement { diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index a31f4434d1..e17507d427 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -7,9 +7,6 @@ package akka.util import java.util.concurrent.locks.{ ReentrantLock } import java.util.concurrent.atomic.{ AtomicBoolean } -/** - * @author Jonas Bonér - */ final class ReentrantGuard { final val lock = new ReentrantLock diff --git a/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala b/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala index e546d2d9af..7c3b57969d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala @@ -21,9 +21,6 @@ entry number it will use MAX_INTEGER). Once all the entries have been processed, new one for its use. */ -/** - * @author Jonas Bonér - */ object BookKeeperServer { val port = 3181 val zkServers = "localhost:2181" diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 356a4461bd..5a3f115ef8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -57,8 +57,6 @@ import com.google.protobuf.ByteString /** * JMX MBean for the cluster service. - * - * @author Jonas Bonér */ trait ClusterNodeMBean { @@ -140,8 +138,6 @@ trait ClusterNodeMBean { /** * Module for the Cluster. Also holds global state such as configuration data etc. - * - * @author Jonas Bonér */ object Cluster { val EMPTY_STRING = "".intern @@ -257,8 +253,6 @@ object Cluster { * * /clusterName/'actor-address-to-uuids'/actorAddress/actorUuid * - * - * @author Jonas Bonér */ class DefaultClusterNode private[akka] ( val nodeAddress: NodeAddress, @@ -1601,9 +1595,6 @@ class DefaultClusterNode private[akka] ( } } -/** - * @author Jonas Bonér - */ class MembershipChildListener(self: ClusterNode) extends IZkChildListener with ErrorHandler { def handleChildChange(parentPath: String, currentChilds: JList[String]) { withErrorHandler { @@ -1643,9 +1634,6 @@ class MembershipChildListener(self: ClusterNode) extends IZkChildListener with E } } -/** - * @author Jonas Bonér - */ class StateListener(self: ClusterNode) extends IZkStateListener { def handleStateChanged(state: KeeperState) { state match { @@ -1671,9 +1659,6 @@ class StateListener(self: ClusterNode) extends IZkStateListener { } } -/** - * @author Jonas Bonér - */ trait ErrorHandler { def withErrorHandler[T](body: ⇒ T) = { try { @@ -1686,9 +1671,6 @@ trait ErrorHandler { } } -/** - * @author Jonas Bonér - */ object RemoteClusterDaemon { val Address = "akka-cluster-daemon".intern @@ -1700,8 +1682,6 @@ object RemoteClusterDaemon { * Internal "daemon" actor for cluster internal communication. * * It acts as the brain of the cluster that responds to cluster events (messages) and undertakes action. - * - * @author Jonas Bonér */ class RemoteClusterDaemon(cluster: ClusterNode) extends Actor { diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala index a61ca3a6e1..84d23af736 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala @@ -21,8 +21,6 @@ import annotation.tailrec /** * ClusterActorRef factory and locator. - * - * @author Jonas Bonér */ object ClusterActorRef { import FailureDetectorType._ @@ -77,8 +75,6 @@ object ClusterActorRef { /** * ActorRef representing a one or many instances of a clustered, load-balanced and sometimes replicated actor * where the instances can reside on other nodes in the cluster. - * - * @author Jonas Bonér */ private[akka] class ClusterActorRef(props: RoutedProps, val address: String) extends AbstractRoutedActorRef(props) { diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala index c9ac211821..4cc791fd89 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala @@ -24,10 +24,6 @@ import java.util.concurrent.{ CountDownLatch, TimeUnit } /** * A ClusterDeployer is responsible for deploying a Deploy. - * - * FIXME Document: what does Deploy mean? - * - * @author Jonas Bonér */ object ClusterDeployer extends ActorDeployer { val clusterName = Cluster.name diff --git a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala index 7643a0bd31..7d593437ae 100644 --- a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala +++ b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala @@ -26,19 +26,12 @@ import java.util.Enumeration // FIXME allow user to choose dynamically between 'async' and 'sync' tx logging (asyncAddEntry(byte[] data, AddCallback cb, Object ctx)) // FIXME clean up old entries in log after doing a snapshot -/** - * @author Jonas Bonér - */ class ReplicationException(message: String, cause: Throwable = null) extends AkkaException(message) { def this(msg: String) = this(msg, null) } /** - * TODO: Explain something about threadsafety. - * * A TransactionLog makes chunks of data durable. - * - * @author Jonas Bonér */ class TransactionLog private ( ledger: LedgerHandle, @@ -352,7 +345,7 @@ class TransactionLog private ( } /** - * @author Jonas Bonér + * TODO: Documentation. */ object TransactionLog { @@ -563,8 +556,6 @@ object TransactionLog { /** * TODO: Documentation. - * - * @author Jonas Bonér */ object LocalBookKeeperEnsemble { private val isRunning = new Switch(false) diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala index 4cf7a7010f..7a3a9ca606 100644 --- a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala +++ b/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala @@ -7,9 +7,6 @@ package akka.cluster.sample import akka.cluster._ import akka.dispatch.Futures -/** - * @author Jonas Bonér - */ object ComputeGridSample { //sample.cluster.ComputeGridSample.fun2 diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala index 7bb01a06f0..96cb764615 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala @@ -33,9 +33,6 @@ class DurableMailboxException private[akka] (message: String, cause: Throwable) def this(message: String) = this(message, null) } -/** - * @author Jonas Bonér - */ abstract class DurableMailbox(owner: ActorCell) extends Mailbox(owner) with DefaultSystemMessageQueue { import DurableExecutableMailboxConfig._ @@ -76,9 +73,6 @@ trait DurableMessageSerialization { } -/** - * @author Jonas Bonér - */ abstract class DurableMailboxType(mailboxFQN: String) extends MailboxType { val constructorSignature = Array[Class[_]](classOf[ActorCell]) @@ -117,8 +111,6 @@ case class FqnDurableMailboxType(mailboxFQN: String) extends DurableMailboxType( * Configurator for the DurableMailbox * Do not forget to specify the "storage", valid values are "redis", "beanstalkd", "zookeeper", "mongodb", "file", * or a full class name of the Mailbox implementation. - * - * @author Jonas Bonér */ class DurableMailboxConfigurator { // TODO PN #896: when and how is this class supposed to be used? Can we remove it? diff --git a/akka-durable-mailboxes/akka-redis-mailbox/src/main/scala/akka/actor/mailbox/RedisBasedMailbox.scala b/akka-durable-mailboxes/akka-redis-mailbox/src/main/scala/akka/actor/mailbox/RedisBasedMailbox.scala index 7bb1c5a5dc..f937be09e0 100644 --- a/akka-durable-mailboxes/akka-redis-mailbox/src/main/scala/akka/actor/mailbox/RedisBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-redis-mailbox/src/main/scala/akka/actor/mailbox/RedisBasedMailbox.scala @@ -13,9 +13,6 @@ import akka.actor.ActorRef class RedisBasedMailboxException(message: String) extends AkkaException(message) -/** - * @author Jonas Bonér - */ class RedisBasedMailbox(val owner: ActorCell) extends DurableMailbox(owner) with DurableMessageSerialization { private val settings = RedisBasedMailboxExtension(owner.system) diff --git a/akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/scala/akka/actor/mailbox/ZooKeeperBasedMailbox.scala b/akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/scala/akka/actor/mailbox/ZooKeeperBasedMailbox.scala index c5efa62358..3a50b93e93 100644 --- a/akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/scala/akka/actor/mailbox/ZooKeeperBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/scala/akka/actor/mailbox/ZooKeeperBasedMailbox.scala @@ -17,9 +17,6 @@ import akka.actor.ActorRef class ZooKeeperBasedMailboxException(message: String) extends AkkaException(message) -/** - * @author Jonas Bonér - */ class ZooKeeperBasedMailbox(val owner: ActorCell) extends DurableMailbox(owner) with DurableMessageSerialization { private val settings = ZooKeeperBasedMailboxExtension(owner.system) diff --git a/akka-kernel/src/main/scala/akka/kernel/Kernel.scala b/akka-kernel/src/main/scala/akka/kernel/Kernel.scala index 8bb66acca4..74c90b47c7 100644 --- a/akka-kernel/src/main/scala/akka/kernel/Kernel.scala +++ b/akka-kernel/src/main/scala/akka/kernel/Kernel.scala @@ -23,8 +23,6 @@ object Main { /** * The Akka Kernel, is used to start And postStop Akka in standalone/kernel mode. - * - * @author Jonas Bonér */ object Kernel extends AkkaLoader { diff --git a/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala b/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala index 2cad35c948..23994337f1 100644 --- a/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala +++ b/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala @@ -12,8 +12,6 @@ import akka.actor.ActorSystemImpl /** * Stream of all kinds of network events, remote failure and connection events, cluster failure and connection events etc. * Also provides API for sender listener management. - * - * @author Jonas Bonér */ object NetworkEventStream { diff --git a/akka-remote/src/main/scala/akka/remote/Remote.scala b/akka-remote/src/main/scala/akka/remote/Remote.scala index 49d85c030f..072c45b4c4 100644 --- a/akka-remote/src/main/scala/akka/remote/Remote.scala +++ b/akka-remote/src/main/scala/akka/remote/Remote.scala @@ -25,8 +25,6 @@ import akka.serialization.SerializationExtension /** * Remote module - contains remote client and server config, remote server instance, remote daemon, remote dispatchers etc. - * - * @author Jonas Bonér */ class Remote(val system: ActorSystemImpl, val nodename: String) { @@ -101,8 +99,6 @@ class Remote(val system: ActorSystemImpl, val nodename: String) { * Internal system "daemon" actor for remote internal communication. * * It acts as the brain of the remote that responds to system remote events (messages) and undertakes action. - * - * @author Jonas Bonér */ class RemoteSystemDaemon(remote: Remote) extends Actor { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 44b756dfba..2b1c1bb528 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -26,8 +26,6 @@ import akka.serialization.SerializationExtension /** * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. - * - * @author Jonas Bonér */ class RemoteActorRefProvider( val systemName: String, @@ -257,8 +255,6 @@ class RemoteActorRefProvider( /** * Remote ActorRef that is used when referencing the Actor on a different node than its "home" node. * This reference is network-aware (remembers its origin) and immutable. - * - * @author Jonas Bonér */ private[akka] case class RemoteActorRef private[akka] ( provider: ActorRefProvider, diff --git a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala b/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala index 7b739b6199..aa3f577ba4 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala @@ -16,8 +16,6 @@ import java.util.concurrent.atomic.AtomicReference /** * Remote connection manager, manages remote connections, e.g. RemoteActorRef's. - * - * @author Jonas Bonér */ class RemoteConnectionManager( system: ActorSystem, diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index b412fcdf3e..04e2483345 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -112,9 +112,7 @@ class PassiveRemoteClient(val currentChannel: Channel, } /** - * RemoteClient represents a connection to an Akka node. Is used to send messages to remote actors on the node. - * - * @author Jonas Bonér + * RemoteClient represents a connection to an Akka node. Is used to send messages to remote actors on the node. */ class ActiveRemoteClient private[akka] ( remoteSupport: NettyRemoteSupport, @@ -241,9 +239,6 @@ class ActiveRemoteClient private[akka] ( private[akka] def resetReconnectionTimeWindow = reconnectionTimeWindowStart = 0L } -/** - * @author Jonas Bonér - */ class ActiveRemoteClientPipelineFactory( name: String, bootstrap: ClientBootstrap, @@ -264,9 +259,6 @@ class ActiveRemoteClientPipelineFactory( } } -/** - * @author Jonas Bonér - */ @ChannelHandler.Sharable class ActiveRemoteClientHandler( val name: String, @@ -536,9 +528,6 @@ class NettyRemoteServer(val remoteSupport: NettyRemoteSupport, val loader: Optio } } -/** - * @author Jonas Bonér - */ class RemoteServerPipelineFactory( val name: String, val openChannels: ChannelGroup, @@ -587,9 +576,6 @@ class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends Si } } -/** - * @author Jonas Bonér - */ @ChannelHandler.Sharable class RemoteServerHandler( val name: String, diff --git a/akka-remote/src/main/scala/akka/serialization/Compression.scala b/akka-remote/src/main/scala/akka/serialization/Compression.scala index 3602b81438..df79fe1f22 100644 --- a/akka-remote/src/main/scala/akka/serialization/Compression.scala +++ b/akka-remote/src/main/scala/akka/serialization/Compression.scala @@ -4,14 +4,8 @@ package akka.serialization -/** - * @author Jonas Bonér - */ object Compression { - /** - * @author Jonas Bonér - */ object LZF { import voldemort.store.compress.lzf._ def compress(bytes: Array[Byte]): Array[Byte] = LZFEncoder encode bytes diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala index 73d37a838b..91f3123634 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala @@ -11,8 +11,6 @@ import akka.actor._ /** * Base trait for all classes that wants to be able use the SLF4J logging infrastructure. - * - * @author Jonas Bonér */ trait SLF4JLogging { @transient @@ -29,8 +27,6 @@ object Logger { * * The thread in which the logging was performed is captured in * Mapped Diagnostic Context (MDC) with attribute name "sourceThread". - * - * @author Jonas Bonér */ class Slf4jEventHandler extends Actor with SLF4JLogging { diff --git a/akka-spring/src/main/scala/akka/spring/ActorFactoryBean.scala b/akka-spring/src/main/scala/akka/spring/ActorFactoryBean.scala index 41d46a4118..84b3be952e 100644 --- a/akka-spring/src/main/scala/akka/spring/ActorFactoryBean.scala +++ b/akka-spring/src/main/scala/akka/spring/ActorFactoryBean.scala @@ -33,7 +33,6 @@ class AkkaBeansException(message: String, cause: Throwable) extends BeansExcepti * @author michaelkober * @author Johan Rask * @author Martin Krasser - * @author Jonas Bonér */ class ActorFactoryBean extends AbstractFactoryBean[AnyRef] with ApplicationContextAware { import AkkaSpringConfigurationTags._ diff --git a/scripts/generate_config_with_secure_cookie.sh b/scripts/generate_config_with_secure_cookie.sh index 6959697d0d..6ef3d06e91 100755 --- a/scripts/generate_config_with_secure_cookie.sh +++ b/scripts/generate_config_with_secure_cookie.sh @@ -7,9 +7,6 @@ exec scala "$0" "$@" */ import java.security.{MessageDigest, SecureRandom} -/** - * @author Jonas Bonér - */ object Crypt { val hex = "0123456789ABCDEF" val lineSeparator = System.getProperty("line.separator") @@ -19,8 +16,8 @@ object Crypt { def md5(text: String): String = md5(unifyLineSeparator(text).getBytes("ASCII")) def md5(bytes: Array[Byte]): String = digest(bytes, MessageDigest.getInstance("MD5")) - - def sha1(text: String): String = sha1(unifyLineSeparator(text).getBytes("ASCII")) + + def sha1(text: String): String = sha1(unifyLineSeparator(text).getBytes("ASCII")) def sha1(bytes: Array[Byte]): String = digest(bytes, MessageDigest.getInstance("SHA1")) @@ -33,8 +30,8 @@ object Crypt { def digest(bytes: Array[Byte], md: MessageDigest): String = { md.update(bytes) hexify(md.digest) - } - + } + def hexify(bytes: Array[Byte]): String = { val builder = new StringBuilder bytes.foreach { byte => builder.append(hex.charAt((byte & 0xF) >> 4)).append(hex.charAt(byte & 0xF)) } From 7db3f62ff54e513f2c964b41e7af0faa9f6a77e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 9 Dec 2011 18:45:52 +0100 Subject: [PATCH 11/27] Converted tabs to spaces. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../TellThroughputSeparateDispatchersPerformanceSpec.scala | 2 +- .../src/main/java/akka/tutorial/first/java/Pi.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputSeparateDispatchersPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputSeparateDispatchersPerformanceSpec.scala index aef501bb2d..ca471b2222 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputSeparateDispatchersPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputSeparateDispatchersPerformanceSpec.scala @@ -127,7 +127,7 @@ class TellThroughputSeparateDispatchersPerformanceSpec extends PerformanceSpec { yield system.actorOf(Props(new Destination).withDispatcher(clientDispatcher)) val clients = for ((dest, j) ← destinations.zipWithIndex) yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(clientDispatcher)) - */ + */ val start = System.nanoTime clients.foreach(_ ! Run) diff --git a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java index 99e7802a22..d4d75c34b4 100644 --- a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java +++ b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java @@ -115,7 +115,7 @@ public class Pi { for (int i = 0; i < nrOfWorkers; i++) add(getContext().actorOf(Worker.class)); } }; - // FIXME routers are intended to be used like this + // FIXME routers are intended to be used like this RoutedProps props = new RoutedProps(routerCreator, new LocalConnectionManager(actors), new akka.actor.Timeout(-1), true); router = new RoutedActorRef(getContext().system(), props, (InternalActorRef) getSelf(), "pi"); } From ceb888b9a7764e070ed637d5c7cd536c59052065 Mon Sep 17 00:00:00 2001 From: Peter Vlugter Date: Fri, 9 Dec 2011 21:55:49 +1300 Subject: [PATCH 12/27] Add scripted release --- akka-docs/_sphinx/themes/akka/layout.html | 5 + project/AkkaBuild.scala | 25 +-- project/Publish.scala | 30 ++- project/Release.scala | 34 +++ project/Rstdoc.scala | 34 +++ project/Unidoc.scala | 4 +- project/scripts/find-replace | 85 ++++++++ project/scripts/find-replace.sh | 82 -------- project/scripts/push-release.sh | 35 ---- project/scripts/release | 244 +++++++++++++++++++++- project/scripts/test-release | 13 -- 11 files changed, 412 insertions(+), 179 deletions(-) create mode 100644 project/Release.scala create mode 100644 project/Rstdoc.scala create mode 100755 project/scripts/find-replace delete mode 100644 project/scripts/find-replace.sh delete mode 100644 project/scripts/push-release.sh mode change 100644 => 100755 project/scripts/release delete mode 100644 project/scripts/test-release diff --git a/akka-docs/_sphinx/themes/akka/layout.html b/akka-docs/_sphinx/themes/akka/layout.html index 0bd735c446..0d46ef708e 100644 --- a/akka-docs/_sphinx/themes/akka/layout.html +++ b/akka-docs/_sphinx/themes/akka/layout.html @@ -6,6 +6,7 @@ {% extends "basic/layout.html" %} {% set script_files = script_files + ['_static/theme_extras.js'] %} {% set css_files = css_files + ['_static/print.css'] %} +{% set is_snapshot = version.endswith("-SNAPSHOT") %} {# do not display relbars #} {% block relbar1 %}{% endblock %} @@ -37,7 +38,11 @@ {%- endif -%}

    {{ shorttitle|e }}

    Version {{ version|e }}

    + {%- if is_snapshot -%}

    PDF

    + {%- else -%} +

    PDF

    + {%- endif -%} {%- endblock %}
    diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index bc596dc126..b92bd7a611 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -24,10 +24,10 @@ object AkkaBuild extends Build { lazy val akka = Project( id = "akka", base = file("."), - settings = parentSettings ++ Unidoc.settings ++ rstdocSettings ++ Seq( + settings = parentSettings ++ Release.settings ++ Unidoc.settings ++ Rstdoc.settings ++ Publish.versionSettings ++ Seq( parallelExecution in GlobalScope := false, - Unidoc.unidocExclude := Seq(samples.id, tutorials.id), - rstdocDirectory <<= baseDirectory / "akka-docs" + Publish.defaultPublishTo in ThisBuild <<= crossTarget / "repository", + Unidoc.unidocExclude := Seq(samples.id, tutorials.id) ), aggregate = Seq(actor, testkit, actorTests, stm, remote, slf4j, amqp, mailboxes, akkaSbtPlugin, samples, tutorials, docs) ) @@ -266,7 +266,7 @@ object AkkaBuild extends Build { // Settings - override lazy val settings = super.settings ++ buildSettings ++ Publish.versionSettings + override lazy val settings = super.settings ++ buildSettings lazy val baseSettings = Defaults.defaultSettings ++ Publish.settings @@ -349,23 +349,6 @@ object AkkaBuild extends Build { compileInputs in MultiJvm <<= (compileInputs in MultiJvm) dependsOn (ScalariformKeys.format in MultiJvm), ScalariformKeys.preferences in MultiJvm := formattingPreferences ) - - // reStructuredText docs - - val rstdocDirectory = SettingKey[File]("rstdoc-directory") - val rstdoc = TaskKey[File]("rstdoc", "Build the reStructuredText documentation.") - - lazy val rstdocSettings = Seq(rstdoc <<= rstdocTask) - - def rstdocTask = (rstdocDirectory, streams) map { - (dir, s) => { - s.log.info("Building reStructuredText documentation...") - val exitCode = Process(List("make", "clean", "html", "pdf"), dir) ! s.log - if (exitCode != 0) sys.error("Failed to build docs.") - s.log.info("Done building docs.") - dir - } - } } // Dependencies diff --git a/project/Publish.scala b/project/Publish.scala index 1fb9039faa..9cea85af3c 100644 --- a/project/Publish.scala +++ b/project/Publish.scala @@ -1,16 +1,19 @@ package akka import sbt._ -import Keys._ +import sbt.Keys._ +import sbt.Project.Initialize import java.io.File object Publish { final val Snapshot = "-SNAPSHOT" + val defaultPublishTo = SettingKey[File]("default-publish-to") + lazy val settings = Seq( crossPaths := false, pomExtra := akkaPomExtra, - publishTo := akkaPublishTo, + publishTo <<= akkaPublishTo, credentials ++= akkaCredentials, organizationName := "Typesafe Inc.", organizationHomepage := Some(url("http://www.typesafe.com")) @@ -32,11 +35,12 @@ object Publish { } - def akkaPublishTo: Option[Resolver] = { - val property = Option(System.getProperty("akka.publish.repository")) - val repo = property map { "Akka Publish Repository" at _ } - val m2repo = Path.userHome / ".m2" /"repository" - repo orElse Some(Resolver.file("Local Maven Repository", m2repo)) + def akkaPublishTo: Initialize[Option[Resolver]] = { + defaultPublishTo { default => + val property = Option(System.getProperty("akka.publish.repository")) + val repo = property map { "Akka Publish Repository" at _ } + repo orElse Some(Resolver.file("Default Local Repository", default)) + } } def akkaCredentials: Seq[Credentials] = { @@ -44,17 +48,11 @@ object Publish { property map (f => Credentials(new File(f))) toSeq } - def stampVersion = Command.command("stamp-version") { state => - append((version in ThisBuild ~= stamp) :: Nil, state) - } + // timestamped versions - // TODO: replace with extracted.append when updated to sbt 0.10.1 - def append(settings: Seq[Setting[_]], state: State): State = { + def stampVersion = Command.command("stamp-version") { state => val extracted = Project.extract(state) - import extracted._ - val append = Load.transformSettings(Load.projectScope(currentRef), currentRef.build, rootProject, settings) - val newStructure = Load.reapply(session.original ++ append, structure) - Project.setProject(session, newStructure, state) + extracted.append(List(version in ThisBuild ~= stamp), state) } def stamp(version: String): String = { diff --git a/project/Release.scala b/project/Release.scala new file mode 100644 index 0000000000..6b6f5643bc --- /dev/null +++ b/project/Release.scala @@ -0,0 +1,34 @@ +package akka + +import sbt._ +import sbt.Keys._ +import java.io.File + +object Release { + val releaseDirectory = SettingKey[File]("release-directory") + + lazy val settings: Seq[Setting[_]] = commandSettings ++ Seq( + releaseDirectory <<= crossTarget / "release" + ) + + lazy val commandSettings = Seq( + commands += buildReleaseCommand + ) + + def buildReleaseCommand = Command.command("build-release") { state => + val extracted = Project.extract(state) + val release = extracted.get(releaseDirectory) + val releaseVersion = extracted.get(version) + val projectRef = extracted.get(thisProjectRef) + val repo = extracted.get(Publish.defaultPublishTo) + val state1 = extracted.runAggregated(publish in projectRef, state) + val (state2, api) = extracted.runTask(Unidoc.unidoc, state1) + val (state3, docs) = extracted.runTask(Rstdoc.rstdoc, state2) + IO.delete(release) + IO.createDirectory(release) + IO.copyDirectory(repo, release / "releases") + IO.copyDirectory(api, release / "api" / "akka" / releaseVersion) + IO.copyDirectory(docs, release / "docs" / "akka" / releaseVersion) + state3 + } +} diff --git a/project/Rstdoc.scala b/project/Rstdoc.scala new file mode 100644 index 0000000000..fb38f756ac --- /dev/null +++ b/project/Rstdoc.scala @@ -0,0 +1,34 @@ +package akka + +import sbt._ +import sbt.Keys._ +import java.io.File + +object Rstdoc { + val rstdocDirectory = SettingKey[File]("rstdoc-directory") + val rstdocTarget = SettingKey[File]("rstdoc-target") + val rstdoc = TaskKey[File]("rstdoc", "Build the reStructuredText documentation.") + + lazy val settings = Seq( + rstdocDirectory <<= baseDirectory / "akka-docs", + rstdocTarget <<= crossTarget / "rstdoc", + rstdoc <<= rstdocTask + ) + + def rstdocTask = (rstdocDirectory, rstdocTarget, streams) map { + (dir, target, s) => { + s.log.info("Building reStructuredText documentation...") + val logger = new ProcessLogger { + def info(o: => String): Unit = s.log.debug(o) + def error(e: => String): Unit = s.log.debug(e) + def buffer[T](f: => T): T = f + } + val exitCode = Process(List("make", "clean", "html", "pdf"), dir) ! logger + if (exitCode != 0) sys.error("Failed to build docs.") + s.log.info("Creating reStructuredText documentation successful.") + IO.copyDirectory(dir / "_build" / "html", target) + IO.copyFile(dir / "_build" / "latex" / "Akka.pdf", target / "Akka.pdf") + target + } + } +} diff --git a/project/Unidoc.scala b/project/Unidoc.scala index 7fffd98a27..209fda53c7 100644 --- a/project/Unidoc.scala +++ b/project/Unidoc.scala @@ -1,8 +1,8 @@ package akka import sbt._ -import Keys._ -import Project.Initialize +import sbt.Keys._ +import sbt.Project.Initialize object Unidoc { val unidocDirectory = SettingKey[File]("unidoc-directory") diff --git a/project/scripts/find-replace b/project/scripts/find-replace new file mode 100755 index 0000000000..d0b6035032 --- /dev/null +++ b/project/scripts/find-replace @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# +# Find and replace across all source files. +# This script will be called as part of the release script. + +# get the source location for this script; handles symlinks +function get_script_path { + local source="${BASH_SOURCE[0]}" + while [ -h "${source}" ] ; do + source="$(readlink "${source}")"; + done + echo ${source} +} + +# path, name, and dir for this script +declare -r script_path=$(get_script_path) +declare -r script_name=$(basename "${script_path}") +declare -r script_dir="$(cd -P "$(dirname "${script_path}")" && pwd)" + +# print usage info +function usage { + echo "Usage: ${script_name} find_expr replace_expr" +} + +function echolog { + echo "[${script_name}] $@" +} + +declare -r find_expr=$1 +declare -r replace_expr=$2 + +if [ -z "$find_expr" ]; then + usage + exit 1 +fi + +echolog "$find_expr --> $replace_expr" + +# exclude directories from search + +declare exclude_dirs=".git dist deploy embedded-repo lib_managed project/boot project/scripts src_managed target" + +echolog "excluding directories: $exclude_dirs" + +exclude_opts="\(" +op="-path" +for dir in $exclude_dirs; do + exclude_opts="${exclude_opts} ${op} '*/${dir}/*'" + op="-or -path" +done +exclude_opts="${exclude_opts} \) -prune -o" + +# replace in files + +search="find . ${exclude_opts} -type f -print0 | xargs -0 grep -Il \"$find_expr\"" + +files=$(eval "$search") + +simple_diff="diff --old-line-format='[$script_name] - %l +' --new-line-format='[$script_name] + %l +' --changed-group-format='%<%>' --unchanged-group-format=''" + +for file in $files; do + echolog $file + # escape / for sed + sedfind=$(echo $find_expr | sed 's/\//\\\//g') + sedreplace=$(echo $replace_expr | sed 's/\//\\\//g') + sed -i '.sed' "s/${sedfind}/${sedreplace}/g" $file + eval "$simple_diff $file.sed $file" + rm -f $file.sed +done + +# replace in file names + +search="find . ${exclude_opts} -type f -name \"*${find_expr}*\" -print" + +files=$(eval "$search") + +for file in $files; do + dir=$(dirname $file) + name=$(basename $file) + newname=$(echo $name | sed "s/${find_expr}/${replace_expr}/g") + echolog "$file --> $newname" + mv $file $dir/$newname +done diff --git a/project/scripts/find-replace.sh b/project/scripts/find-replace.sh deleted file mode 100644 index fc21a8aa9f..0000000000 --- a/project/scripts/find-replace.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash - -# Find and replace across all source files. -# -# Example usage: -# -# sh project/scripts/find-replace.sh 1.1-SNAPSHOT 1.1-RC1 -# -# This script will be called as part of the sbt release script. - -FIND=$1 -REPLACE=$2 - -if [ -z "$FIND" ]; then - echo "Usage: find-replace.sh FIND REPLACE" - exit 1 -fi - -echo -echo "Find and replace: $FIND --> $REPLACE" - - -# Exclude directories from search - -excludedirs=".git dist deploy embedded-repo lib_managed project/boot project/scripts src_managed target" - -echo "Excluding directories: $excludedirs" - -excludeopts="\(" -op="-path" -for dir in $excludedirs; do - excludeopts="${excludeopts} ${op} '*/${dir}/*'" - op="-or -path" -done -excludeopts="${excludeopts} \) -prune -o" - - -# Replace in files - -search="find . ${excludeopts} -type f -print0 | xargs -0 grep -Il \"${FIND}\"" - -echo $search -echo - -files=$(eval "$search") - -simplediff="diff --old-line-format='- %l -' --new-line-format='+ %l -' --changed-group-format='%<%>' --unchanged-group-format=''" - -for file in $files; do - echo - echo $file - # escape / for sed - sedfind=$(echo $FIND | sed 's/\//\\\//g') - sedreplace=$(echo $REPLACE | sed 's/\//\\\//g') - sed -i '.sed' "s/${sedfind}/${sedreplace}/g" $file - eval "$simplediff $file.sed $file" - rm -f $file.sed -done - -echo - - -# Replace in file names - -search="find . ${excludeopts} -type f -name \"*${FIND}*\" -print" - -echo $search -echo - -files=$(eval "$search") - -for file in $files; do - dir=$(dirname $file) - name=$(basename $file) - newname=$(echo $name | sed "s/${FIND}/${REPLACE}/g") - echo "$file --> $newname" - mv $file $dir/$newname -done - -echo diff --git a/project/scripts/push-release.sh b/project/scripts/push-release.sh deleted file mode 100644 index c58282144f..0000000000 --- a/project/scripts/push-release.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -VERSION=$1 - -if [ -z "$VERSION" ]; then - echo "Usage: push-release.sh VERSION" - exit 1 -fi - -source ~/.akka-release - -if [ -z "$AKKA_RELEASE_SERVER" ]; then - echo "Need AKKA_RELEASE_SERVER to be specified" - exit 1 -fi - -if [ -z "$AKKA_RELEASE_PATH" ]; then - echo "Need AKKA_RELEASE_PATH to be specified" - exit 1 -fi - -ref=$(git symbolic-ref HEAD 2> /dev/null) -branch=${ref#refs/heads/} - -git push origin $branch -git push origin --tags - -release="target/release/${VERSION}" -tmp="/tmp/akka-release-${VERSION}" - -rsync -avz ${release}/ ${AKKA_RELEASE_SERVER}:${tmp}/ -echo "Verify sudo on $AKKA_RELEASE_SERVER" -ssh -t ${AKKA_RELEASE_SERVER} sudo -v -ssh -t ${AKKA_RELEASE_SERVER} sudo rsync -rpt ${tmp}/ ${AKKA_RELEASE_PATH} -ssh -t ${AKKA_RELEASE_SERVER} rm -rf ${tmp} diff --git a/project/scripts/release b/project/scripts/release old mode 100644 new mode 100755 index 847e8c350a..1870102161 --- a/project/scripts/release +++ b/project/scripts/release @@ -1,10 +1,234 @@ -sh git checkout -b releasing-{{release.arg1}} -set akka.release true -clean -script find-replace.sh {{project.version}} {{release.arg1}} -script find-replace.sh //[[:space:]]*release:[[:space:]]* -reload -build-release -sh git add . -sh git commit -am 'Update version for release {{project.version}}' -sh git tag -m 'Version {{project.version}}' v{{project.version}} +#!/usr/bin/env bash +# +# Release script for Akka. + +# defaults +declare -r default_server="akka.io" +declare -r default_path="/akka/www" + +# settings +declare -r release_dir="target/release" +declare release_server=${default_server} +declare release_path=${default_path} + +# flags +unset run_tests + +# get the source location for this script; handles symlinks +function get_script_path { + local source="${BASH_SOURCE[0]}" + while [ -h "${source}" ] ; do + source="$(readlink "${source}")"; + done + echo ${source} +} + +# path, name, and dir for this script +declare -r script_path=$(get_script_path) +declare -r script_name=$(basename "${script_path}") +declare -r script_dir="$(cd -P "$(dirname "${script_path}")" && pwd)" + +# print usage info +function usage { + cat <&2 +} + +# fail the script with an error message +function fail { + echoerr "$@" + exit 1 +} + +# process options and set flags +while true; do + case "$1" in + -h | --help ) usage; exit 1 ;; + -t | --run-tests ) run_tests=true; shift ;; + -s | --server ) release_server=$2; shift 2 ;; + -p | --path ) release_path=$2; shift 2 ;; + * ) break ;; + esac +done + +if [ $# != "1" ]; then + usage + fail "A release version must be specified" +fi + +declare -r version=$1 +declare -r publish_path="${release_server}:${release_path}" + +# check for a git command +type -P git &> /dev/null || fail "git command not found" + +# check for an sbt command +type -P sbt &> /dev/null || fail "sbt command not found" + +# get the current git branch +function get_current_branch { + local ref=$(git symbolic-ref HEAD 2> /dev/null) + local branch=${ref#refs/heads/} + echo "${branch}" +} + +# get the current project version from sbt +# a little messy as the ansi escape codes are included +function get_current_version { + local result=$(sbt version | tail -1 | cut -f2) + # remove ansi escape code from end + local code0=$(echo -e "\033[0m") + echo ${result%$code0} +} + +# store the current git branch for cleaning up +declare -r initial_branch=$(get_current_branch) + +# check we have an initial branch +[[ "${initial_branch}" ]] || fail "Not on a git branch" + +# check that we have a clean status +[[ -z "$(git status --porcelain)" ]] || { + git status + fail "There are uncommitted changes - please commit before releasing" +} + +# the branch we'll release on +declare -r release_branch="releasing-${version}" + +# try to run a cleanup command - these shouldn't actually fail +function safely { + "$@" || fail "Failed to clean up release - please check current state" +} + +# perform a clean up when a failure has occurred +function git_cleanup { + echoerr "Cleaning up..." + local branch=$(get_current_branch) + safely git reset --hard + safely git clean -f + if [ "${branch}" == "${release_branch}" ]; then + safely git checkout ${initial_branch} + safely git branch -d ${release_branch} + local tags=$(git tag -l) + [[ "${tags}" == *v${version}* ]] && safely git tag -d v${version} + fi + echoerr "Cleaned up failed release" +} + +# clean up and fail the script with an error message +function bail_out { + echoerr "Bailing out!" + git_cleanup + fail "$@" +} + +# bail out for signals +function signal_bail_out { + echoerr "Interrupted by signal" + bail_out "Received signal to stop release" +} + +# bail out on signals +trap signal_bail_out SIGHUP SIGINT SIGTERM + +# try to run a command or otherwise bail out +function try { + "$@" || bail_out "Failed to create release" +} + +echolog "Creating release ${version} ..." +echolog "Publishing to ${publish_path}" +[[ $run_tests ]] && echolog "All tests will be run" + +# try ssh'ing to the release server +echolog "Checking ssh connection to ${release_server}" +try ssh -t ${release_server} echo "Successfully contacted release server." + +echolog "Getting current project version from sbt..." +declare -r current_version=$(get_current_version) +echolog "Current version is ${current_version}" + +# check out a release branch +try git checkout -b ${release_branch} + +# find and replace the version +try ${script_dir}/find-replace ${current_version} ${version} + +# start clean +try sbt clean + +# run the tests if specified +if [ $run_tests ]; then + echolog "Running all tests..." + try sbt test + echolog "All tests are green" +fi + +# build the release +echolog "Building the release..." +try sbt build-release +echolog "Successfully created local release" + +# commit and tag this release +echolog "Committing and tagging..." +try git add . +try git commit -am "Update version for release ${version}" +try git tag -m "Version ${version}" v${version} + +# the point of no return... we're now pushing out to servers + +# use a special failure from now on +function arrgh { + cat 1>&2 < Date: Sun, 11 Dec 2011 00:53:15 +0100 Subject: [PATCH 13/27] add Deadline class MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - support implicit conversion to Duration, which gives the time left until the deadline - easy construction with “fromNow”, either from Duration or directly: 2 seconds fromNow --- .../test/scala/akka/util/DurationSpec.scala | 10 ++ .../src/main/scala/akka/util/Duration.scala | 150 ++++++++++++------ .../scala/akka/util/duration/package.scala | 17 ++ 3 files changed, 130 insertions(+), 47 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala index d30fdc83be..abb30c3daa 100644 --- a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala @@ -40,6 +40,16 @@ class DurationSpec extends WordSpec with MustMatchers { (minf + minf) must be(minf) } + "support fromNow" in { + val dead = 2.seconds.fromNow + val dead2 = 2 seconds fromNow + (dead: Duration) must be > 1.second + (dead2: Duration) must be > 1.second + 1.second.sleep + (dead: Duration) must be < 1.second + (dead2: Duration) must be < 1.second + } + } } diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index eec371d724..fb915ada73 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -38,6 +38,12 @@ case class Timer(duration: Duration, throwExceptionOnTimeout: Boolean = false) { } } +case class Deadline(d: Duration) +object Deadline { + def now = Duration(System.nanoTime, NANOSECONDS) + implicit def toGo(d: Deadline): Duration = d.d - now +} + object Duration { def apply(length: Long, unit: TimeUnit): Duration = new FiniteDuration(length, unit) def apply(length: Double, unit: TimeUnit): Duration = fromNanos(unit.toNanos(1) * length) @@ -129,10 +135,7 @@ object Duration { override def *(factor: Double): Duration = throw new IllegalArgumentException("cannot multiply Undefined duration") override def /(factor: Double): Duration = throw new IllegalArgumentException("cannot divide Undefined duration") override def /(other: Duration): Double = throw new IllegalArgumentException("cannot divide Undefined duration") - def >(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration") - def >=(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration") - def <(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration") - def <=(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration") + def compare(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration") def unary_- : Duration = throw new IllegalArgumentException("cannot negate Undefined duration") } @@ -183,10 +186,7 @@ object Duration { */ val Inf: Duration = new Duration with Infinite { override def toString = "Duration.Inf" - def >(other: Duration) = true - def >=(other: Duration) = true - def <(other: Duration) = false - def <=(other: Duration) = false + def compare(other: Duration) = 1 def unary_- : Duration = MinusInf } @@ -196,10 +196,7 @@ object Duration { */ val MinusInf: Duration = new Duration with Infinite { override def toString = "Duration.MinusInf" - def >(other: Duration) = false - def >=(other: Duration) = false - def <(other: Duration) = true - def <=(other: Duration) = true + def compare(other: Duration) = -1 def unary_- : Duration = Inf } @@ -255,7 +252,7 @@ object Duration { * val d3 = d2 + 1.millisecond * */ -abstract class Duration extends Serializable { +abstract class Duration extends Serializable with Ordered[Duration] { def length: Long def unit: TimeUnit def toNanos: Long @@ -267,10 +264,6 @@ abstract class Duration extends Serializable { def toDays: Long def toUnit(unit: TimeUnit): Double def printHMS: String - def <(other: Duration): Boolean - def <=(other: Duration): Boolean - def >(other: Duration): Boolean - def >=(other: Duration): Boolean def +(other: Duration): Duration def -(other: Duration): Duration def *(factor: Double): Duration @@ -281,6 +274,7 @@ abstract class Duration extends Serializable { def min(other: Duration): Duration = if (this < other) this else other def max(other: Duration): Duration = if (this > other) this else other def sleep(): Unit = Thread.sleep(toMillis) + def fromNow: Deadline = Deadline(Deadline.now + this) // Java API def lt(other: Duration) = this < other @@ -329,37 +323,12 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000. % 60) - def <(other: Duration) = { + def compare(other: Duration) = if (other.finite_?) { - toNanos < other.asInstanceOf[FiniteDuration].toNanos - } else { - other > this - } - } - - def <=(other: Duration) = { - if (other.finite_?) { - toNanos <= other.asInstanceOf[FiniteDuration].toNanos - } else { - other >= this - } - } - - def >(other: Duration) = { - if (other.finite_?) { - toNanos > other.asInstanceOf[FiniteDuration].toNanos - } else { - other < this - } - } - - def >=(other: Duration) = { - if (other.finite_?) { - toNanos >= other.asInstanceOf[FiniteDuration].toNanos - } else { - other <= this - } - } + val me = toNanos + val o = other.toNanos + if (me > o) 1 else if (me < o) -1 else 0 + } else -other.compare(this) def +(other: Duration) = { if (!other.finite_?) { @@ -397,6 +366,8 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { } class DurationInt(n: Int) { + import duration.Classifier + def nanoseconds = Duration(n, NANOSECONDS) def nanos = Duration(n, NANOSECONDS) def nanosecond = Duration(n, NANOSECONDS) @@ -423,9 +394,38 @@ class DurationInt(n: Int) { def days = Duration(n, DAYS) def day = Duration(n, DAYS) + + def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + + def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + + def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + + def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) + def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) + + def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) + def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) + + def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) + def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) + + def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) + def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) } class DurationLong(n: Long) { + import duration.Classifier + def nanoseconds = Duration(n, NANOSECONDS) def nanos = Duration(n, NANOSECONDS) def nanosecond = Duration(n, NANOSECONDS) @@ -452,9 +452,38 @@ class DurationLong(n: Long) { def days = Duration(n, DAYS) def day = Duration(n, DAYS) + + def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, NANOSECONDS)) + + def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MICROSECONDS)) + + def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MILLISECONDS)) + + def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) + def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, SECONDS)) + + def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) + def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, MINUTES)) + + def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) + def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, HOURS)) + + def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) + def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(n, DAYS)) } class DurationDouble(d: Double) { + import duration.Classifier + def nanoseconds = Duration(d, NANOSECONDS) def nanos = Duration(d, NANOSECONDS) def nanosecond = Duration(d, NANOSECONDS) @@ -481,5 +510,32 @@ class DurationDouble(d: Double) { def days = Duration(d, DAYS) def day = Duration(d, DAYS) + + def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) + def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) + def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) + def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, NANOSECONDS)) + + def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) + def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) + def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) + def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MICROSECONDS)) + + def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) + def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) + def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) + def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MILLISECONDS)) + + def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, SECONDS)) + def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, SECONDS)) + + def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MINUTES)) + def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, MINUTES)) + + def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, HOURS)) + def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, HOURS)) + + def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, DAYS)) + def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(Duration(d, DAYS)) } diff --git a/akka-actor/src/main/scala/akka/util/duration/package.scala b/akka-actor/src/main/scala/akka/util/duration/package.scala index 97e0e82c39..aad0c7940c 100644 --- a/akka-actor/src/main/scala/akka/util/duration/package.scala +++ b/akka-actor/src/main/scala/akka/util/duration/package.scala @@ -7,6 +7,23 @@ package akka.util import java.util.concurrent.TimeUnit package object duration { + trait Classifier[C] { + type R + def convert(d: Duration): R + } + + object span + implicit object spanConvert extends Classifier[span.type] { + type R = Duration + def convert(d: Duration) = d + } + + object fromNow + implicit object fromNowConvert extends Classifier[fromNow.type] { + type R = Deadline + def convert(d: Duration) = Deadline(Deadline.now + d) + } + implicit def intToDurationInt(n: Int) = new DurationInt(n) implicit def longToDurationLong(n: Long) = new DurationLong(n) implicit def doubleToDurationDouble(d: Double) = new DurationDouble(d) From 27e93f66ca636f8688568e376874e436783844e4 Mon Sep 17 00:00:00 2001 From: Roland Date: Sun, 11 Dec 2011 11:21:34 +0100 Subject: [PATCH 14/27] polish Deadline class - remove implicit because that feels too wide: explicitly extracting the current timeLeft seems much better. - if an implicit Duration is asked for, provide one given an implicit Deadline (this does not act as view; the details elude me, but it is what I want) - add basic arithmetic --- .../src/test/scala/akka/util/DurationSpec.scala | 8 ++++---- akka-actor/src/main/scala/akka/util/Duration.scala | 14 ++++++++++---- .../main/scala/akka/util/duration/package.scala | 2 +- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala index abb30c3daa..6a291872b8 100644 --- a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala @@ -43,11 +43,11 @@ class DurationSpec extends WordSpec with MustMatchers { "support fromNow" in { val dead = 2.seconds.fromNow val dead2 = 2 seconds fromNow - (dead: Duration) must be > 1.second - (dead2: Duration) must be > 1.second + dead.timeLeft must be > 1.second + dead2.timeLeft must be > 1.second 1.second.sleep - (dead: Duration) must be < 1.second - (dead2: Duration) must be < 1.second + dead.timeLeft must be < 1.second + dead2.timeLeft must be < 1.second } } diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index fb915ada73..6e9310e5d8 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -38,13 +38,19 @@ case class Timer(duration: Duration, throwExceptionOnTimeout: Boolean = false) { } } -case class Deadline(d: Duration) +case class Deadline(d: Duration) { + def +(other: Duration): Deadline = copy(d = d + other) + def -(other: Duration): Deadline = copy(d = d - other) + def -(other: Deadline): Duration = d - other.d + def timeLeft: Duration = this - Deadline.now +} object Deadline { - def now = Duration(System.nanoTime, NANOSECONDS) - implicit def toGo(d: Deadline): Duration = d.d - now + def now: Deadline = Deadline(Duration(System.nanoTime, NANOSECONDS)) } object Duration { + implicit def timeLeft(implicit d: Deadline): Duration = d.timeLeft + def apply(length: Long, unit: TimeUnit): Duration = new FiniteDuration(length, unit) def apply(length: Double, unit: TimeUnit): Duration = fromNanos(unit.toNanos(1) * length) def apply(length: Long, unit: String): Duration = new FiniteDuration(length, timeUnit(unit)) @@ -274,7 +280,7 @@ abstract class Duration extends Serializable with Ordered[Duration] { def min(other: Duration): Duration = if (this < other) this else other def max(other: Duration): Duration = if (this > other) this else other def sleep(): Unit = Thread.sleep(toMillis) - def fromNow: Deadline = Deadline(Deadline.now + this) + def fromNow: Deadline = Deadline.now + this // Java API def lt(other: Duration) = this < other diff --git a/akka-actor/src/main/scala/akka/util/duration/package.scala b/akka-actor/src/main/scala/akka/util/duration/package.scala index aad0c7940c..88a328d6d8 100644 --- a/akka-actor/src/main/scala/akka/util/duration/package.scala +++ b/akka-actor/src/main/scala/akka/util/duration/package.scala @@ -21,7 +21,7 @@ package object duration { object fromNow implicit object fromNowConvert extends Classifier[fromNow.type] { type R = Deadline - def convert(d: Duration) = Deadline(Deadline.now + d) + def convert(d: Duration) = Deadline.now + d } implicit def intToDurationInt(n: Int) = new DurationInt(n) From baf2a1746af6a5d6f500e16ede54aa09c53ea58c Mon Sep 17 00:00:00 2001 From: Roland Date: Sun, 11 Dec 2011 13:34:57 +0100 Subject: [PATCH 15/27] add docs for Deadline --- akka-docs/common/duration.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/akka-docs/common/duration.rst b/akka-docs/common/duration.rst index 523c8a2283..6ddc5f6245 100644 --- a/akka-docs/common/duration.rst +++ b/akka-docs/common/duration.rst @@ -48,4 +48,18 @@ method calls instead: assert (diff.lt(fivesec)); assert (Duration.Zero().lt(Duration.Inf())); +Deadline +======== +Durations have a brother name :class:`Deadline`, which is a class holding a representation +of an absolute point in time, and support deriving a duration from this by calculating the +difference between now and the deadline. This is useful when you want to keep one overall +deadline without having to take care of the book-keeping wrt. the passing of time yourself:: + + val deadline = 10 seconds fromNow + // do something which takes time + awaitCond(..., deadline.timeLeft) + +In Java you create these from durations:: + + final Deadline d = Duration.create(5, "seconds").fromNow(); From 08af7684e5f046b811170c84fd8bbfcfc9d1c1b4 Mon Sep 17 00:00:00 2001 From: Peter Vlugter Date: Mon, 12 Dec 2011 15:45:43 +1300 Subject: [PATCH 16/27] Include copy xsd in release script --- project/scripts/copy-xsd.sh | 40 ------------------------------------- project/scripts/release | 1 + 2 files changed, 1 insertion(+), 40 deletions(-) delete mode 100644 project/scripts/copy-xsd.sh diff --git a/project/scripts/copy-xsd.sh b/project/scripts/copy-xsd.sh deleted file mode 100644 index 214d3332b2..0000000000 --- a/project/scripts/copy-xsd.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# Copy the akka-version.xsd file to akka.io, renaming it for a release. -# -# Example usage: -# -# sh project/scripts/copy-xsd.sh 1.1-RC1 - -RELEASE=$1 - -if [ -z "$RELEASE" ]; then - echo "Usage: copy-xsd.sh RELEASE" - exit 1 -fi - -version=`grep 'project.version' project/build.properties | cut -d '=' -f2` - -if [ -z "$version" ]; then - echo "Couldn't find the current version in project/build.properties" - exit 1 -fi - -source ~/.akka-release - -if [ -z "$AKKA_RELEASE_SERVER" ]; then - echo "Need AKKA_RELEASE_SERVER to be specified" - exit 1 -fi - -if [ -z "$AKKA_RELEASE_PATH" ]; then - echo "Need AKKA_RELEASE_PATH to be specified" - exit 1 -fi - -echo "Verify sudo on $AKKA_RELEASE_SERVER" -ssh -t ${AKKA_RELEASE_SERVER} sudo -v - -scp akka-spring/src/main/resources/akka/spring/akka-${version}.xsd ${AKKA_RELEASE_SERVER}:/tmp/akka-${RELEASE}.xsd -ssh -t ${AKKA_RELEASE_SERVER} sudo cp /tmp/akka-${RELEASE}.xsd ${AKKA_RELEASE_PATH}/akka-${RELEASE}.xsd -ssh -t ${AKKA_RELEASE_SERVER} rm -f /tmp/akka-${RELEASE}.xsd diff --git a/project/scripts/release b/project/scripts/release index 1870102161..ccbad76f36 100755 --- a/project/scripts/release +++ b/project/scripts/release @@ -183,6 +183,7 @@ fi # build the release echolog "Building the release..." try sbt build-release +try cp akka-spring/src/main/resources/akka/spring/akka-*.xsd ${release_dir} echolog "Successfully created local release" # commit and tag this release From eaafed69ebbcc7b3260df30b80b80e9d9554926b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 08:37:18 +0100 Subject: [PATCH 17/27] DOC: Update Durable Mailboxes Chapter. See #1472 --- akka-docs/cluster/durable-mailbox.rst | 278 ------------------ akka-docs/cluster/index.rst | 1 - .../actor/mailbox/DurableMailboxDocSpec.scala | 31 ++ .../actor/mailbox/DurableMailboxDocTest.scala | 5 + .../mailbox/DurableMailboxDocTestBase.java | 41 +++ akka-docs/modules/durable-mailbox.rst | 221 ++++++++++++++ akka-docs/modules/index.rst | 1 + .../akka/actor/mailbox/DurableMailbox.scala | 16 + project/AkkaBuild.scala | 2 +- 9 files changed, 316 insertions(+), 280 deletions(-) delete mode 100644 akka-docs/cluster/durable-mailbox.rst create mode 100644 akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala create mode 100644 akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala create mode 100644 akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java create mode 100644 akka-docs/modules/durable-mailbox.rst diff --git a/akka-docs/cluster/durable-mailbox.rst b/akka-docs/cluster/durable-mailbox.rst deleted file mode 100644 index 875d6ea9fb..0000000000 --- a/akka-docs/cluster/durable-mailbox.rst +++ /dev/null @@ -1,278 +0,0 @@ - -.. _durable-mailboxes: - -################### - Durable Mailboxes -################### - -Overview -======== - -Akka supports a set of durable mailboxes. A durable mailbox is a replacement for -the standard actor mailbox that is durable. What this means in practice is that -if there are pending messages in the actor's mailbox when the node of the actor -resides on crashes, then when you restart the node, the actor will be able to -continue processing as if nothing had happened; with all pending messages still -in its mailbox. - -.. sidebar:: **IMPORTANT** - - None of these mailboxes work with blocking message send, e.g. the message - send operations that are relying on futures; ``?`` or ``ask``. If the node - has crashed and then restarted, the thread that was blocked waiting for the - reply is gone and there is no way we can deliver the message. - -The durable mailboxes currently supported are: - - - ``FileDurableMailboxStorage`` -- backed by a journaling transaction log on the local file system - - ``RedisDurableMailboxStorage`` -- backed by Redis - - ``ZooKeeperDurableMailboxStorage`` -- backed by ZooKeeper - - ``BeanstalkDurableMailboxStorage`` -- backed by Beanstalkd - - ``MongoNaiveDurableMailboxStorage`` -- backed by MongoDB - -We'll walk through each one of these in detail in the sections below. - -Soon Akka will also have: - - - ``AmqpDurableMailboxStorage`` -- AMQP based mailbox (default RabbitMQ) - - ``JmsDurableMailboxStorage`` -- JMS based mailbox (default ActiveMQ) - - -File-based durable mailbox -========================== - -This mailbox is backed by a journaling transaction log on the local file -system. It is the simplest want to use since it does not require an extra -infrastructure piece to administer, but it is usually sufficient and just what -you need. - -The durable dispatchers and their configuration options reside in the -``akka.actor.mailbox`` package. - -You configure durable mailboxes through the "Akka"-only durable dispatchers, the -actor is oblivious to which type of mailbox it is using. Here is an example:: - - val dispatcher = DurableDispatcher( - "my:service", - FileDurableMailboxStorage) - // Then set the actors dispatcher to this dispatcher - -or for a thread-based durable dispatcher:: - - self.dispatcher = DurablePinnedDispatcher( - self, - FileDurableMailboxStorage) - -There are 2 different durable dispatchers, ``DurableDispatcher`` and -``DurablePinnedDispatcher``, which are durable versions of -``Dispatcher`` and ``PinnedDispatcher``. - -This gives you an excellent way of creating bulkheads in your application, where -groups of actors sharing the same dispatcher also share the same backing -storage. - -Read more about that in the :ref:`dispatchers-scala` documentation. - -You can also configure and tune the file-based durable mailbox. This is done in -the ``akka.actor.mailbox.file-based`` section in the :ref:`configuration`. - -.. code-block:: none - - akka { - actor { - mailbox { - file-based { - directory-path = "./_mb" - max-items = 2147483647 - max-size = 2147483647 - max-items = 2147483647 - max-age = 0 - max-journal-size = 16777216 # 16 * 1024 * 1024 - max-memory-size = 134217728 # 128 * 1024 * 1024 - max-journal-overflow = 10 - max-journal-size-absolute = 9223372036854775807 - discard-old-when-full = on - keep-journal = on - sync-journal = off - } - } - } - } - -.. todo:: explain all the above options in detail - - -Redis-based durable mailbox -=========================== - -This mailbox is backed by a Redis queue. `Redis `_ Is a very -fast NOSQL database that has a wide range of data structure abstractions, one of -them is a queue which is what we are using in this implementation. This means -that you have to start up a Redis server that can host these durable -mailboxes. Read more in the Redis documentation on how to do that. - -Here is an example of how you can configure your dispatcher to use this mailbox:: - - val dispatcher = DurableDispatcher( - "my:service", - RedisDurableMailboxStorage) - -or for a thread-based durable dispatcher:: - - self.dispatcher = DurablePinnedDispatcher( - self, - RedisDurableMailboxStorage) - -You also need to configure the IP and port for the Redis server. This is done in -the ``akka.actor.mailbox.redis`` section in the :ref:`configuration`. - -.. code-block:: none - - akka { - actor { - mailbox { - redis { - hostname = "127.0.0.1" - port = 6379 - } - } - } - } - - -ZooKeeper-based durable mailbox -=============================== - -This mailbox is backed by `ZooKeeper `_. ZooKeeper -is a centralized service for maintaining configuration information, naming, -providing distributed synchronization, and providing group services This means -that you have to start up a ZooKeeper server (for production a ZooKeeper server -ensamble) that can host these durable mailboxes. Read more in the ZooKeeper -documentation on how to do that. - -Akka is using ZooKeeper for many other things, for example the clustering -support so if you're using that you love to run a ZooKeeper server anyway and -there will not be that much more work to set up this durable mailbox. - -Here is an example of how you can configure your dispatcher to use this mailbox:: - - val dispatcher = DurableDispatcher( - "my:service", - ZooKeeperDurableMailboxStorage) - -or for a thread-based durable dispatcher:: - - self.dispatcher = DurablePinnedDispatcher( - self, - ZooKeeperDurableMailboxStorage) - -You also need to configure ZooKeeper server addresses, timeouts, etc. This is -done in the ``akka.actor.mailbox.zookeeper`` section in the :ref:`configuration`. - -.. code-block:: none - - akka { - actor { - mailbox { - zookeeper { - server-addresses = "localhost:2181" - session-timeout = 60 - connection-timeout = 30 - blocking-queue = on - } - } - } - } - - -Beanstalk-based durable mailbox -=============================== - -This mailbox is backed by `Beanstalkd `_. -Beanstalk is a simple, fast work queue. This means that you have to start up a -Beanstalk server that can host these durable mailboxes. Read more in the -Beanstalk documentation on how to do that. :: - - val dispatcher = DurableDispatcher( - "my:service", - BeanstalkDurableMailboxStorage) - -or for a thread-based durable dispatcher. :: - - self.dispatcher = DurablePinnedDispatcher( - self, - BeanstalkDurableMailboxStorage) - -You also need to configure the IP, and port, and so on, for the Beanstalk -server. This is done in the ``akka.actor.mailbox.beanstalk`` section in the -:ref:`configuration`. - -.. code-block:: none - - akka { - actor { - mailbox { - beanstalk { - hostname = "127.0.0.1" - port = 11300 - reconnect-window = 5 - message-submit-delay = 0 - message-submit-timeout = 5 - message-time-to-live = 120 - } - } - } - } - -MongoDB-based Durable Mailboxes -=============================== - -This mailbox is backed by `MongoDB `_. -MongoDB is a fast, lightweight and scalable document-oriented database. It contains a number of -features cohesive to a fast, reliable & durable queueing mechanism which the Akka Mailbox takes advantage of. - - -Akka's implementations of MongoDB mailboxes are built on top of the purely asynchronous MongoDB driver (often known as `Hammersmith `_ and ``com.mongodb.async``) and as such are purely callback based with a Netty network layer. This makes them extremely fast & lightweight versus building on other MongoDB implementations such as `mongo-java-driver `_ and `Casbah `_. - -You will need to configure the URI for the MongoDB server, using the URI Format specified in the `MongoDB Documentation `_. This is done in -the ``akka.actor.mailbox.mongodb`` section in the :ref:`configuration`. - -.. code-block:: none - - mongodb { - # Any specified collection name will be used as a prefix for collections that use durable mongo mailboxes - uri = "mongodb://localhost/akka.mailbox" # Follow Mongo URI Spec - http://www.mongodb.org/display/DOCS/Connections - # Configurable timeouts for certain ops - timeout { - read = 3000 # number of milliseconds to wait for a read to succeed before timing out the future - write = 3000 # number of milliseconds to wait for a write to succeed before timing out the future - } - } - -You must specify a hostname (and optionally port) and at *least* a Database name. If you specify a collection name, it will be used as a 'prefix' for the collections Akka creates to store mailbox messages. Otherwise, collections will be prefixed with ``mailbox.`` - -It is also possible to configure the timeout threshholds for Read and Write operations in the ``timeout`` block. -Currently Akka offers only one "type" of MongoDB based Mailbox but there are plans to support at least -one other kind which uses a different queueing strategy. - - -'Naive' MongoDB-based Durable Mailbox -------------------------------------- -The currently supported mailbox is considered "Naive" as it removes messages (using the ``findAndRemove`` -command) from the MongoDB datastore as soon as the actor consumes them. This could cause message loss -if an actor crashes before completely processing a message. It is not a problem per sé, but behavior -users should be aware of. - -Here is an example of how you can configure your dispatcher to use this mailbox:: - - val dispatcher = DurableDispatcher( - "my:service", - MongoNaiveDurableMailboxStorage) - -or for a thread-based durable dispatcher:: - - self.dispatcher = DurablePinnedDispatcher( - self, - MongoNaiveDurableMailboxStorage) - - diff --git a/akka-docs/cluster/index.rst b/akka-docs/cluster/index.rst index bdbd95bde6..35c4b2250a 100644 --- a/akka-docs/cluster/index.rst +++ b/akka-docs/cluster/index.rst @@ -5,4 +5,3 @@ Cluster :maxdepth: 2 cluster - durable-mailbox diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala new file mode 100644 index 0000000000..566aef4c27 --- /dev/null +++ b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -0,0 +1,31 @@ +package akka.docs.actor.mailbox + +//#imports +import akka.actor.Actor +import akka.actor.Props +import akka.actor.mailbox.FileDurableMailboxType + +//#imports + +import org.scalatest.{ BeforeAndAfterAll, WordSpec } +import org.scalatest.matchers.MustMatchers +import akka.testkit.AkkaSpec + +class MyActor extends Actor { + def receive = { + case x ⇒ + } +} + +class DurableMailboxDocSpec extends AkkaSpec { + + "define dispatcher with durable mailbox" in { + //#define-dispatcher + val dispatcher = system.dispatcherFactory.newDispatcher( + "my-dispatcher", throughput = 1, mailboxType = FileDurableMailboxType).build + val myActor = system.actorOf(Props[MyActor].withDispatcher(dispatcher), name = "myactor") + //#define-dispatcher + myActor ! "hello" + } + +} diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala new file mode 100644 index 0000000000..fa31f08b6a --- /dev/null +++ b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTest.scala @@ -0,0 +1,5 @@ +package akka.docs.actor.mailbox + +import org.scalatest.junit.JUnitSuite + +class DurableMailboxDocTest extends DurableMailboxDocTestBase with JUnitSuite diff --git a/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java new file mode 100644 index 0000000000..4ac3204d0b --- /dev/null +++ b/akka-docs/modules/code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java @@ -0,0 +1,41 @@ +package akka.docs.actor.mailbox; + +//#imports +import akka.actor.mailbox.DurableMailboxType; +import akka.dispatch.MessageDispatcher; +import akka.actor.UntypedActorFactory; +import akka.actor.UntypedActor; +import akka.actor.Props; + +//#imports + +import org.junit.Test; + +import akka.actor.ActorRef; +import akka.actor.ActorSystem; + +import static org.junit.Assert.*; + +public class DurableMailboxDocTestBase { + + @Test + public void defineDispatcher() { + ActorSystem system = ActorSystem.create("MySystem"); + //#define-dispatcher + MessageDispatcher dispatcher = system.dispatcherFactory() + .newDispatcher("my-dispatcher", 1, DurableMailboxType.fileDurableMailboxType()).build(); + ActorRef myActor = system.actorOf(new Props().withDispatcher(dispatcher).withCreator(new UntypedActorFactory() { + public UntypedActor create() { + return new MyUntypedActor(); + } + })); + //#define-dispatcher + myActor.tell("test"); + system.stop(); + } + + public static class MyUntypedActor extends UntypedActor { + public void onReceive(Object message) { + } + } +} diff --git a/akka-docs/modules/durable-mailbox.rst b/akka-docs/modules/durable-mailbox.rst new file mode 100644 index 0000000000..9291f22d37 --- /dev/null +++ b/akka-docs/modules/durable-mailbox.rst @@ -0,0 +1,221 @@ + +.. _durable-mailboxes: + +################### + Durable Mailboxes +################### + +.. sidebar:: Contents + + .. contents:: :local: + +Overview +======== + +Akka supports a set of durable mailboxes. A durable mailbox is a replacement for +the standard actor mailbox that is durable. What this means in practice is that +if there are pending messages in the actor's mailbox when the node of the actor +resides on crashes, then when you restart the node, the actor will be able to +continue processing as if nothing had happened; with all pending messages still +in its mailbox. + +None of these mailboxes implements transactions for current message. It's possible +if the actor crashes after receiving a message, but before completing processing of +it, that the message could be lost. + +.. warning:: **IMPORTANT** + + None of these mailboxes work with blocking message send, e.g. the message + send operations that are relying on futures; ``?`` or ``ask``. If the node + has crashed and then restarted, the thread that was blocked waiting for the + reply is gone and there is no way we can deliver the message. + +The durable mailboxes currently supported are: + + - ``FileBasedMailbox`` -- backed by a journaling transaction log on the local file system + - ``RedisBasedMailbox`` -- backed by Redis + - ``ZooKeeperBasedMailbox`` -- backed by ZooKeeper + - ``BeanstalkBasedMailbox`` -- backed by Beanstalkd + - ``MongoBasedMailbox`` -- backed by MongoDB + +We'll walk through each one of these in detail in the sections below. + +You can easily implement your own mailbox. Look at the existing implementations for inspiration. + +Soon Akka will also have: + + - ``AmqpBasedMailbox`` -- AMQP based mailbox (default RabbitMQ) + - ``JmsBasedMailbox`` -- JMS based mailbox (default ActiveMQ) + + +.. _DurableMailbox.General: + +General Usage +------------- + +The durable mailboxes and their configuration options reside in the +``akka.actor.mailbox`` package. + +You configure durable mailboxes through the dispatcher. The +actor is oblivious to which type of mailbox it is using. +Here is an example in Scala: + +.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala + :include: imports,define-dispatcher + +Corresponding example in Java: + +.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocTestBase.java + :include: imports,define-dispatcher + +The actor is oblivious to which type of mailbox it is using. + +This gives you an excellent way of creating bulkheads in your application, where +groups of actors sharing the same dispatcher also share the same backing +storage. Read more about that in the :ref:`dispatchers-scala` documentation. + +File-based durable mailbox +========================== + +This mailbox is backed by a journaling transaction log on the local file +system. It is the simplest to use since it does not require an extra +infrastructure piece to administer, but it is usually sufficient and just what +you need. + +You configure durable mailboxes through the dispatcher, as described in +:ref:`DurableMailbox.General` with the following mailbox type. + +Scala:: + + mailbox = akka.actor.mailbox.FileDurableMailboxType + +Java:: + + akka.actor.mailbox.DurableMailboxType.fileDurableMailboxType() + + +You can also configure and tune the file-based durable mailbox. This is done in +the ``akka.actor.mailbox.file-based`` section in the :ref:`configuration`. + +.. literalinclude:: ../../akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf + :language: none + + +Redis-based durable mailbox +=========================== + +This mailbox is backed by a Redis queue. `Redis `_ Is a very +fast NOSQL database that has a wide range of data structure abstractions, one of +them is a queue which is what we are using in this implementation. This means +that you have to start up a Redis server that can host these durable +mailboxes. Read more in the Redis documentation on how to do that. + +You configure durable mailboxes through the dispatcher, as described in +:ref:`DurableMailbox.General` with the following mailbox type. + +Scala:: + + mailbox = akka.actor.mailbox.RedisDurableMailboxType + +Java:: + + akka.actor.mailbox.DurableMailboxType.redisDurableMailboxType() + + +You also need to configure the IP and port for the Redis server. This is done in +the ``akka.actor.mailbox.redis`` section in the :ref:`configuration`. + +.. literalinclude:: ../../akka-durable-mailboxes/akka-redis-mailbox/src/main/resources/reference.conf + :language: none + + +ZooKeeper-based durable mailbox +=============================== + +This mailbox is backed by `ZooKeeper `_. ZooKeeper +is a centralized service for maintaining configuration information, naming, +providing distributed synchronization, and providing group services This means +that you have to start up a ZooKeeper server (for production a ZooKeeper server +ensamble) that can host these durable mailboxes. Read more in the ZooKeeper +documentation on how to do that. + +You configure durable mailboxes through the dispatcher, as described in +:ref:`DurableMailbox.General` with the following mailbox type. + +Scala:: + + mailbox = akka.actor.mailbox.ZooKeeperDurableMailboxType + +Java:: + + akka.actor.mailbox.DurableMailboxType.zooKeeperDurableMailboxType() + +You also need to configure ZooKeeper server addresses, timeouts, etc. This is +done in the ``akka.actor.mailbox.zookeeper`` section in the :ref:`configuration`. + +.. literalinclude:: ../../akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/resources/reference.conf + :language: none + +Beanstalk-based durable mailbox +=============================== + +This mailbox is backed by `Beanstalkd `_. +Beanstalk is a simple, fast work queue. This means that you have to start up a +Beanstalk server that can host these durable mailboxes. Read more in the +Beanstalk documentation on how to do that. + +You configure durable mailboxes through the dispatcher, as described in +:ref:`DurableMailbox.General` with the following mailbox type. + +Scala:: + + mailbox = akka.actor.mailbox.BeanstalkDurableMailboxType + +Java:: + + akka.actor.mailbox.DurableMailboxType.beanstalkDurableMailboxType() + +You also need to configure the IP, and port, and so on, for the Beanstalk +server. This is done in the ``akka.actor.mailbox.beanstalk`` section in the +:ref:`configuration`. + +.. literalinclude:: ../../akka-durable-mailboxes/akka-beanstalk-mailbox/src/main/resources/reference.conf + :language: none + +MongoDB-based Durable Mailboxes +=============================== + +This mailbox is backed by `MongoDB `_. +MongoDB is a fast, lightweight and scalable document-oriented database. It contains a number of +features cohesive to a fast, reliable & durable queueing mechanism which the Akka Mailbox takes advantage of. + +Akka's implementations of MongoDB mailboxes are built on top of the purely asynchronous MongoDB driver +(often known as `Hammersmith `_ and ``com.mongodb.async``) +and as such are purely callback based with a Netty network layer. This makes them extremely fast & +lightweight versus building on other MongoDB implementations such as +`mongo-java-driver `_ and `Casbah `_. + +You configure durable mailboxes through the dispatcher, as described in +:ref:`DurableMailbox.General` with the following mailbox type. + +Scala:: + + mailbox = akka.actor.mailbox.MongoDurableMailboxType + +Java:: + + akka.actor.mailbox.DurableMailboxType.mongoDurableMailboxType() + +You will need to configure the URI for the MongoDB server, using the URI Format specified in the +`MongoDB Documentation `_. This is done in +the ``akka.actor.mailbox.mongodb`` section in the :ref:`configuration`. + +.. literalinclude:: ../../akka-durable-mailboxes/akka-mongo-mailbox/src/main/resources/reference.conf + :language: none + +You must specify a hostname (and optionally port) and at *least* a Database name. If you specify a +collection name, it will be used as a 'prefix' for the collections Akka creates to store mailbox messages. +Otherwise, collections will be prefixed with ``mailbox.`` + +It is also possible to configure the timeout thresholds for Read and Write operations in the ``timeout`` block. + diff --git a/akka-docs/modules/index.rst b/akka-docs/modules/index.rst index c4d5211562..780d5b23df 100644 --- a/akka-docs/modules/index.rst +++ b/akka-docs/modules/index.rst @@ -4,6 +4,7 @@ Modules .. toctree:: :maxdepth: 2 + durable-mailbox microkernel camel spring diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala index 96cb764615..7eb30b2fdb 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/scala/akka/actor/mailbox/DurableMailbox.scala @@ -107,6 +107,22 @@ case object FileDurableMailboxType extends DurableMailboxType("akka.actor.mailbo case object ZooKeeperDurableMailboxType extends DurableMailboxType("akka.actor.mailbox.ZooKeeperBasedMailbox") case class FqnDurableMailboxType(mailboxFQN: String) extends DurableMailboxType(mailboxFQN) +/** + * Java API for the mailbox types. Usage: + *
    
    + * MessageDispatcher dispatcher = system.dispatcherFactory()
    + *   .newDispatcher("my-dispatcher", 1, DurableMailboxType.redisDurableMailboxType()).build();
    + * 
    + */ +object DurableMailboxType { + def redisDurableMailboxType(): DurableMailboxType = RedisDurableMailboxType + def mongoDurableMailboxType(): DurableMailboxType = MongoDurableMailboxType + def beanstalkDurableMailboxType(): DurableMailboxType = BeanstalkDurableMailboxType + def fileDurableMailboxType(): DurableMailboxType = FileDurableMailboxType + def zooKeeperDurableMailboxType(): DurableMailboxType = ZooKeeperDurableMailboxType + def fqnDurableMailboxType(mailboxFQN: String): DurableMailboxType = FqnDurableMailboxType(mailboxFQN) +} + /** * Configurator for the DurableMailbox * Do not forget to specify the "storage", valid values are "redis", "beanstalkd", "zookeeper", "mongodb", "file", diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index b92bd7a611..68c7d4e561 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -256,7 +256,7 @@ object AkkaBuild extends Build { lazy val docs = Project( id = "akka-docs", base = file("akka-docs"), - dependencies = Seq(actor, testkit % "test->test", stm, remote, slf4j), + dependencies = Seq(actor, testkit % "test->test", stm, remote, slf4j, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox), settings = defaultSettings ++ Seq( unmanagedSourceDirectories in Test <<= baseDirectory { _ ** "code" get }, libraryDependencies ++= Dependencies.docs, From f07768d94e04e290632a73ad84154e1463b22041 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 12:08:35 +0100 Subject: [PATCH 18/27] DOC: Disabled spring, camel and microkernel. See #1455 --- akka-docs/disabled/camel.rst | 2903 ++++++++++++++++++++++++++++ akka-docs/disabled/microkernel.rst | 40 + akka-docs/disabled/spring.rst | 335 ++++ akka-docs/modules/camel.rst | 2896 +-------------------------- akka-docs/modules/microkernel.rst | 34 +- akka-docs/modules/spring.rst | 329 +--- 6 files changed, 3281 insertions(+), 3256 deletions(-) create mode 100644 akka-docs/disabled/camel.rst create mode 100644 akka-docs/disabled/microkernel.rst create mode 100644 akka-docs/disabled/spring.rst diff --git a/akka-docs/disabled/camel.rst b/akka-docs/disabled/camel.rst new file mode 100644 index 0000000000..8b2b84c992 --- /dev/null +++ b/akka-docs/disabled/camel.rst @@ -0,0 +1,2903 @@ + +.. _camel-module: + +####### + Camel +####### + +For an introduction to akka-camel, see also the `Appendix E - Akka and Camel`_ +(pdf) of the book `Camel in Action`_. + +.. _Appendix E - Akka and Camel: http://www.manning.com/ibsen/appEsample.pdf +.. _Camel in Action: http://www.manning.com/ibsen/ + +Contents: + +.. contents:: :local: + +Other, more advanced external articles are: + +* `Akka Consumer Actors: New Features and Best Practices `_ +* `Akka Producer Actors: New Features and Best Practices `_ + + +Introduction +============ + +The akka-camel module allows actors, untyped actors, and typed actors to receive +and send messages over a great variety of protocols and APIs. This section gives +a brief overview of the general ideas behind the akka-camel module, the +remaining sections go into the details. In addition to the native Scala and Java +actor API, actors can now exchange messages with other systems over large number +of protocols and APIs such as HTTP, SOAP, TCP, FTP, SMTP or JMS, to mention a +few. At the moment, approximately 80 protocols and APIs are supported. + +The akka-camel module is based on `Apache Camel`_, a powerful and leight-weight +integration framework for the JVM. For an introduction to Apache Camel you may +want to read this `Apache Camel article`_. Camel comes with a +large number of `components`_ that provide bindings to different protocols and +APIs. The `camel-extra`_ project provides further components. + +.. _Apache Camel: http://camel.apache.org/ +.. _Apache Camel article: http://architects.dzone.com/articles/apache-camel-integration +.. _components: http://camel.apache.org/components.html +.. _camel-extra: http://code.google.com/p/camel-extra/ + +Usage of Camel's integration components in Akka is essentially a +one-liner. Here's an example. + +.. code-block:: scala + + import akka.actor.Actor + import akka.actor.Actor._ + import akka.camel.{Message, Consumer} + + class MyActor extends Actor with Consumer { + def endpointUri = "mina:tcp://localhost:6200?textline=true" + + def receive = { + case msg: Message => { /* ... */} + case _ => { /* ... */} + } + } + + // start and expose actor via tcp + val myActor = actorOf[MyActor] + +The above example exposes an actor over a tcp endpoint on port 6200 via Apache +Camel's `Mina component`_. The actor implements the endpointUri method to define +an endpoint from which it can receive messages. After starting the actor, tcp +clients can immediately send messages to and receive responses from that +actor. If the message exchange should go over HTTP (via Camel's `Jetty +component`_), only the actor's endpointUri method must be changed. + +.. _Mina component: http://camel.apache.org/mina.html +.. _Jetty component: http://camel.apache.org/jetty.html + +.. code-block:: scala + + class MyActor extends Actor with Consumer { + def endpointUri = "jetty:http://localhost:8877/example" + + def receive = { + case msg: Message => { /* ... */} + case _ => { /* ... */} + } + } + +Actors can also trigger message exchanges with external systems i.e. produce to +Camel endpoints. + +.. code-block:: scala + + import akka.actor.Actor + import akka.camel.{Producer, Oneway} + + class MyActor extends Actor with Producer with Oneway { + def endpointUri = "jms:queue:example" + } + +In the above example, any message sent to this actor will be added (produced) to +the example JMS queue. Producer actors may choose from the same set of Camel +components as Consumer actors do. + +The number of Camel components is constantly increasing. The akka-camel module +can support these in a plug-and-play manner. Just add them to your application's +classpath, define a component-specific endpoint URI and use it to exchange +messages over the component-specific protocols or APIs. This is possible because +Camel components bind protocol-specific message formats to a Camel-specific +`normalized message format`__. The normalized message format hides +protocol-specific details from Akka and makes it therefore very easy to support +a large number of protocols through a uniform Camel component interface. The +akka-camel module further converts mutable Camel messages into `immutable +representations`__ which are used by Consumer and Producer actors for pattern +matching, transformation, serialization or storage, for example. + +__ https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/Message.java +__ http://github.com/jboner/akka/blob/v0.8/akka-camel/src/main/scala/akka/Message.scala#L17 + + +Dependencies +============ + +Akka's Camel Integration consists of two modules + +* akka-camel - this module depends on akka-actor and camel-core (+ transitive + dependencies) and implements the Camel integration for (untyped) actors + +* akka-camel-typed - this module depends on akka-typed-actor and akka-camel (+ + transitive dependencies) and implements the Camel integration for typed actors + +The akka-camel-typed module is optional. To have both untyped and typed actors +working with Camel, add the following dependencies to your SBT project +definition. + +.. code-block:: scala + + import sbt._ + + class Project(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { + // ... + val akkaCamel = akkaModule("camel") + val akkaCamelTyped = akkaModule("camel-typed") // optional typed actor support + // ... + } + + +.. _camel-consume-messages: + +Consume messages +================ + +Actors (untyped) +---------------- + +For actors (Scala) to receive messages, they must mixin the `Consumer`_ +trait. For example, the following actor class (Consumer1) implements the +endpointUri method, which is declared in the Consumer trait, in order to receive +messages from the ``file:data/input/actor`` Camel endpoint. Untyped actors +(Java) need to extend the abstract UntypedConsumerActor class and implement the +getEndpointUri() and onReceive(Object) methods. + +.. _Consumer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Consumer.scala + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor + import akka.camel.{Message, Consumer} + + class Consumer1 extends Actor with Consumer { + def endpointUri = "file:data/input/actor" + + def receive = { + case msg: Message => println("received %s" format msg.bodyAs[String]) + } + } + +**Java** + +.. code-block:: java + + import akka.camel.Message; + import akka.camel.UntypedConsumerActor; + + public class Consumer1 extends UntypedConsumerActor { + public String getEndpointUri() { + return "file:data/input/actor"; + } + + public void onReceive(Object message) { + Message msg = (Message)message; + String body = msg.getBodyAs(String.class); + System.out.println(String.format("received %s", body)) + } + } + +Whenever a file is put into the data/input/actor directory, its content is +picked up by the Camel `file component`_ and sent as message to the +actor. Messages consumed by actors from Camel endpoints are of type +`Message`_. These are immutable representations of Camel messages. + +.. _file component: http://camel.apache.org/file2.html +.. _Message: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Message.scala + +For Message usage examples refer to the unit tests: + +* Message unit tests - `Scala API `_ +* Message unit tests - `Java API `_ + +Here's another example that sets the endpointUri to +``jetty:http://localhost:8877/camel/default``. It causes Camel's `Jetty +component`_ to start an embedded `Jetty`_ server, accepting HTTP connections +from localhost on port 8877. + +.. _Jetty component: http://camel.apache.org/jetty.html +.. _Jetty: http://www.eclipse.org/jetty/ + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor + import akka.camel.{Message, Consumer} + + class Consumer2 extends Actor with Consumer { + def endpointUri = "jetty:http://localhost:8877/camel/default" + + def receive = { + case msg: Message => self.reply("Hello %s" format msg.bodyAs[String]) + } + } + +**Java** + +.. code-block:: java + + import akka.camel.Message; + import akka.camel.UntypedConsumerActor; + + public class Consumer2 extends UntypedConsumerActor { + public String getEndpointUri() { + return "jetty:http://localhost:8877/camel/default"; + } + + public void onReceive(Object message) { + Message msg = (Message)message; + String body = msg.getBodyAs(String.class); + getContext().tryReply(String.format("Hello %s", body)); + } + } + +After starting the actor, clients can send messages to that actor by POSTing to +``http://localhost:8877/camel/default``. The actor sends a response by using the +self.reply method (Scala). For returning a message body and headers to the HTTP +client the response type should be `Message`_. For any other response type, a +new Message object is created by akka-camel with the actor response as message +body. + +.. _Message: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Message.scala + + +Typed actors +------------ + +Typed actors can also receive messages from Camel endpoints. In contrast to +(untyped) actors, which only implement a single receive or onReceive method, a +typed actor may define several (message processing) methods, each of which can +receive messages from a different Camel endpoint. For a typed actor method to be +exposed as Camel endpoint it must be annotated with the `@consume +annotation`_. For example, the following typed consumer actor defines two +methods, foo and bar. + +.. _@consume annotation: http://github.com/jboner/akka/blob/master/akka-camel/src/main/java/akka/camel/consume.java + +**Scala** + +.. code-block:: scala + + import org.apache.camel.{Body, Header} + import akka.actor.TypedActor + import akka.camel.consume + + trait TypedConsumer1 { + @consume("file:data/input/foo") + def foo(body: String): Unit + + @consume("jetty:http://localhost:8877/camel/bar") + def bar(@Body body: String, @Header("X-Whatever") header: String): String + } + + class TypedConsumer1Impl extends TypedActor with TypedConsumer1 { + def foo(body: String) = println("Received message: %s" format body) + def bar(body: String, header: String) = "body=%s header=%s" format (body, header) + } + +**Java** + +.. code-block:: java + + import org.apache.camel.Body; + import org.apache.camel.Header; + import akka.actor.TypedActor; + import akka.camel.consume; + + public interface TypedConsumer1 { + @consume("file:data/input/foo") + public void foo(String body); + + @consume("jetty:http://localhost:8877/camel/bar") + public String bar(@Body String body, @Header("X-Whatever") String header); + } + + public class TypedConsumer1Impl extends TypedActor implements TypedConsumer1 { + public void foo(String body) { + System.out.println(String.format("Received message: ", body)); + } + + public String bar(String body, String header) { + return String.format("body=%s header=%s", body, header); + } + } + +The foo method can be invoked by placing a file in the data/input/foo +directory. Camel picks up the file from this directory and akka-camel invokes +foo with the file content as argument (converted to a String). Camel +automatically tries to convert messages to appropriate types as defined by the +method parameter(s). The conversion rules are described in detail on the +following pages: + +* `Bean integration `_ +* `Bean binding `_ +* `Parameter binding `_ + +The bar method can be invoked by POSTing a message to +http://localhost:8877/camel/bar. Here, parameter binding annotations are used to +tell Camel how to extract data from the HTTP message. The @Body annotation binds +the HTTP request body to the first parameter, the @Header annotation binds the +X-Whatever header to the second parameter. The return value is sent as HTTP +response message body to the client. + +Parameter binding annotations must be placed on the interface, the @consume +annotation can also be placed on the methods in the implementation class. + + +.. _camel-publishing: + +Consumer publishing +------------------- + +Actors (untyped) +^^^^^^^^^^^^^^^^ + +Publishing a consumer actor at its Camel endpoint occurs when the actor is +started. Publication is done asynchronously; setting up an endpoint (more +precisely, the route from that endpoint to the actor) may still be in progress +after the ActorRef method returned. + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor._ + + val actor = actorOf[Consumer1] // create Consumer actor and activate endpoint in background + +**Java** + +.. code-block:: java + + import static akka.actor.Actors.*; + import akka.actor.ActorRef; + + ActorRef actor = actorOf(Consumer1.class); // create Consumer actor and activate endpoint in background + + +Typed actors +^^^^^^^^^^^^ + +Publishing of typed actor methods is done when the typed actor is created with +one of the TypedActor.newInstance(..) methods. Publication is done in the +background here as well i.e. it may still be in progress when +TypedActor.newInstance(..) returns. + +**Scala** + +.. code-block:: scala + + import akka.actor.TypedActor + + // create TypedConsumer1 object and activate endpoint(s) in background + val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl]) + +**Java** + +.. code-block:: java + + import akka.actor.TypedActor; + + // create TypedConsumer1 object and activate endpoint(s) in background + TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class); + + +.. _camel-consumers-and-camel-service: + +Consumers and the CamelService +------------------------------ + +Publishing of consumer actors or typed actor methods requires a running +CamelService. The Akka :ref:`microkernel` can start a CamelService automatically +(see :ref:`camel-configuration`). When using Akka in other environments, a +CamelService must be started manually. Applications can do that by calling the +CamelServiceManager.startCamelService method. + +**Scala** + +.. code-block:: scala + + import akka.camel.CamelServiceManager._ + + startCamelService + +**Java** + +.. code-block:: java + + import static akka.camel.CamelServiceManager.*; + + startCamelService(); + +If applications need to wait for a certain number of consumer actors or typed +actor methods to be published they can do so with the +``CamelServiceManager.mandatoryService.awaitEndpointActivation`` method, where +``CamelServiceManager.mandatoryService`` is the current CamelService instance +(or throws an IllegalStateException there's no current CamelService). + +**Scala** + +.. code-block:: scala + + import akka.camel.CamelServiceManager._ + + startCamelService + + // Wait for three conumer endpoints to be activated + mandatoryService.awaitEndpointActivation(3) { + // Start three consumer actors (for example) + // ... + } + + // Communicate with consumer actors via their activated endpoints + // ... + +**Java** + +.. code-block:: java + + import akka.japi.SideEffect; + import static akka.camel.CamelServiceManager.*; + + startCamelService(); + + // Wait for three conumer endpoints to be activated + getMandatoryService().awaitEndpointActivation(3, new SideEffect() { + public void apply() { + // Start three consumer actors (for example) + // ... + } + }); + + // Communicate with consumer actors via their activated endpoints + // ... + +Alternatively, one can also use ``Option[CamelService]`` returned by +``CamelServiceManager.service``. + +**Scala** + +.. code-block:: scala + + import akka.camel.CamelServiceManager._ + + startCamelService + + for(s <- service) s.awaitEndpointActivation(3) { + // ... + } + +**Java** + +.. code-block:: java + + import java.util.concurrent.CountDownLatch; + + import akka.camel.CamelService; + import static akka.camel.CamelServiceManager.*; + + startCamelService(); + + for (CamelService s : getService()) s.awaitEndpointActivation(3, new SideEffect() { + public void apply() { + // ... + } + }); + +:ref:`camel-configuration` additionally describes how a CamelContext, that is +managed by a CamelService, can be cutomized before starting the service. When +the CamelService is no longer needed, it should be stopped. + +**Scala** + +.. code-block:: scala + + import akka.camel.CamelServiceManager._ + + stopCamelService + +**Java** + +.. code-block:: java + + import static akka.camel.CamelServiceManager.*; + + stopCamelService(); + + +.. _camel-unpublishing: + +Consumer un-publishing +---------------------- + +Actors (untyped) +^^^^^^^^^^^^^^^^ + +When an actor is stopped, the route from the endpoint to that actor is stopped +as well. For example, stopping an actor that has been previously published at +``http://localhost:8877/camel/test`` will cause a connection failure when trying +to access that endpoint. Stopping the route is done asynchronously; it may be +still in progress after the ``ActorRef.stop`` method returned. + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor._ + + val actor = actorOf[Consumer1] // create Consumer actor + actor // activate endpoint in background + // ... + actor.stop // deactivate endpoint in background + +**Java** + +.. code-block:: java + + import static akka.actor.Actors.*; + import akka.actor.ActorRef; + + ActorRef actor = actorOf(Consumer1.class); // create Consumer actor and activate endpoint in background + // ... + actor.stop(); // deactivate endpoint in background + + +Typed actors +^^^^^^^^^^^^ + +When a typed actor is stopped, routes to @consume annotated methods of this +typed actors are stopped as well. Stopping the routes is done asynchronously; it +may be still in progress after the TypedActor.stop method returned. + +**Scala** + +.. code-block:: scala + + import akka.actor.TypedActor + + // create TypedConsumer1 object and activate endpoint(s) in background + val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl]) + + // deactivate endpoints in background + TypedActor.stop(consumer) + +**Java** + +.. code-block:: java + + import akka.actor.TypedActor; + + // Create typed consumer actor and activate endpoints in background + TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class); + + // Deactivate endpoints in background + TypedActor.stop(consumer); + + +.. _camel-acknowledgements: + +Acknowledgements +---------------- + +Actors (untyped) +^^^^^^^^^^^^^^^^ + +With in-out message exchanges, clients usually know that a message exchange is +done when they receive a reply from a consumer actor. The reply message can be a +Message (or any object which is then internally converted to a Message) on +success, and a Failure message on failure. + +With in-only message exchanges, by default, an exchange is done when a message +is added to the consumer actor's mailbox. Any failure or exception that occurs +during processing of that message by the consumer actor cannot be reported back +to the endpoint in this case. To allow consumer actors to positively or +negatively acknowledge the receipt of a message from an in-only message +exchange, they need to override the ``autoack`` (Scala) or ``isAutoack`` (Java) +method to return false. In this case, consumer actors must reply either with a +special Ack message (positive acknowledgement) or a Failure (negative +acknowledgement). + +**Scala** + +.. code-block:: scala + + import akka.camel.{Ack, Failure} + // ... other imports omitted + + class Consumer3 extends Actor with Consumer { + override def autoack = false + + def endpointUri = "jms:queue:test" + + def receive = { + // ... + self.reply(Ack) // on success + // ... + self.reply(Failure(...)) // on failure + } + } + +**Java** + +.. code-block:: java + + import akka.camel.Failure + import static akka.camel.Ack.ack; + // ... other imports omitted + + public class Consumer3 extends UntypedConsumerActor { + + public String getEndpointUri() { + return "jms:queue:test"; + } + + public boolean isAutoack() { + return false; + } + + public void onReceive(Object message) { + // ... + getContext().reply(ack()) // on success + // ... + val e: Exception = ... + getContext().reply(new Failure(e)) // on failure + } + } + + +.. _camel-blocking-exchanges: + +Blocking exchanges +------------------ + +By default, message exchanges between a Camel endpoint and a consumer actor are +non-blocking because, internally, the ! (bang) operator is used to commicate +with the actor. The route to the actor does not block waiting for a reply. The +reply is sent asynchronously (see also :ref:`camel-asynchronous-routing`). +Consumer actors however can be configured to make this interaction blocking. + +**Scala** + +.. code-block:: scala + + class ExampleConsumer extends Actor with Consumer { + override def blocking = true + + def endpointUri = ... + def receive = { + // ... + } + } + +**Java** + +.. code-block:: java + + public class ExampleConsumer extends UntypedConsumerActor { + + public boolean isBlocking() { + return true; + } + + public String getEndpointUri() { + // ... + } + + public void onReceive(Object message) { + // ... + } + } + +In this case, the ``!!`` (bangbang) operator is used internally to communicate +with the actor which blocks a thread until the consumer sends a response or +throws an exception within receive. Although it may decrease scalability, this +setting can simplify error handling (see `this article`_) or allows timeout +configurations on actor-level (see :ref:`camel-timeout`). + +.. _this article: http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html + + +.. _camel-timeout: + +Consumer timeout +---------------- + +Endpoints that support two-way communications need to wait for a response from +an (untyped) actor or typed actor before returning it to the initiating client. +For some endpoint types, timeout values can be defined in an endpoint-specific +way which is described in the documentation of the individual `Camel +components`_. Another option is to configure timeouts on the level of consumer +actors and typed consumer actors. + +.. _Camel components: http://camel.apache.org/components.html + + +Typed actors +^^^^^^^^^^^^ + +For typed actors, timeout values for method calls that return a result can be +set when the typed actor is created. In the following example, the timeout is +set to 20 seconds (default is 5 seconds). + +**Scala** + +.. code-block:: scala + + import akka.actor.TypedActor + + val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl], 20000 /* 20 seconds */) + +**Java** + +.. code-block:: java + + import akka.actor.TypedActor; + + TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class, 20000 /* 20 seconds */); + + +Actors (untyped) +^^^^^^^^^^^^^^^^ + +Two-way communications between a Camel endpoint and an (untyped) actor are +initiated by sending the request message to the actor with the ``!`` (bang) +operator and the actor replies to the endpoint when the response is ready. In +order to support timeouts on actor-level, endpoints need to send the request +message with the ``!!`` (bangbang) operator for which a timeout value is +applicable. This can be achieved by overriding the Consumer.blocking method to +return true. + +**Scala** + +.. code-block:: scala + + class Consumer2 extends Actor with Consumer { + self.timeout = 20000 // timeout set to 20 seconds + + override def blocking = true + + def endpointUri = "direct:example" + + def receive = { + // ... + } + } + +**Java** + +.. code-block:: java + + public class Consumer2 extends UntypedConsumerActor { + + public Consumer2() { + getContext().setTimeout(20000); // timeout set to 20 seconds + } + + public String getEndpointUri() { + return "direct:example"; + } + + public boolean isBlocking() { + return true; + } + + public void onReceive(Object message) { + // ... + } + } + +This is a valid approach for all endpoint types that do not "natively" support +asynchronous two-way message exchanges. For all other endpoint types (like +`Jetty`_ endpoints) is it not recommended to switch to blocking mode but rather +to configure timeouts in an endpoint-specific way (see +also :ref:`camel-asynchronous-routing`). + + +Remote consumers +---------------- + +Actors (untyped) +^^^^^^^^^^^^^^^^ + +Publishing of remote consumer actors is always done on the server side, local +proxies are never published. Hence the CamelService must be started on the +remote node. For example, to publish an (untyped) actor on a remote node at +endpoint URI ``jetty:http://localhost:6644/remote-actor-1``, define the +following consumer actor class. + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor + import akka.annotation.consume + import akka.camel.Consumer + + class RemoteActor1 extends Actor with Consumer { + def endpointUri = "jetty:http://localhost:6644/remote-actor-1" + + protected def receive = { + case msg => self.reply("response from remote actor 1") + } + } + +**Java** + +.. code-block:: java + + import akka.camel.UntypedConsumerActor; + + public class RemoteActor1 extends UntypedConsumerActor { + public String getEndpointUri() { + return "jetty:http://localhost:6644/remote-actor-1"; + } + + public void onReceive(Object message) { + getContext().tryReply("response from remote actor 1"); + } + } + +On the remote node, start a `CamelService`_, start a remote server, create the +actor and register it at the remote server. + +.. _CamelService: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/CamelService.scala + +**Scala** + +.. code-block:: scala + + import akka.camel.CamelServiceManager._ + import akka.actor.Actor._ + import akka.actor.ActorRef + + // ... + startCamelService + + val consumer = val consumer = actorOf[RemoteActor1] + + remote.start("localhost", 7777) + remote.register(consumer) // register and start remote consumer + // ... + +**Java** + +.. code-block:: java + + import akka.camel.CamelServiceManager; + import static akka.actor.Actors.*; + + // ... + CamelServiceManager.startCamelService(); + + ActorRef actor = actorOf(RemoteActor1.class); + + remote().start("localhost", 7777); + remote().register(actor); // register and start remote consumer + // ... + +Explicitly starting a CamelService can be omitted when Akka is running in Kernel +mode, for example (see also :ref:`camel-configuration`). + + +Typed actors +^^^^^^^^^^^^ + +Remote typed consumer actors can be registered with one of the +``registerTyped*`` methods on the remote server. The following example registers +the actor with the custom id "123". + +**Scala** + +.. code-block:: scala + + import akka.actor.TypedActor + + // ... + val obj = TypedActor.newRemoteInstance( + classOf[SampleRemoteTypedConsumer], + classOf[SampleRemoteTypedConsumerImpl]) + + remote.registerTypedActor("123", obj) + // ... + +**Java** + +.. code-block:: java + + import akka.actor.TypedActor; + + SampleRemoteTypedConsumer obj = (SampleRemoteTypedConsumer)TypedActor.newInstance( + SampleRemoteTypedConsumer.class, + SampleRemoteTypedConsumerImpl.class); + + remote.registerTypedActor("123", obj) + // ... + + +Produce messages +================ + +A minimum pre-requisite for producing messages to Camel endpoints with producer +actors (see below) is an initialized and started CamelContextManager. + +**Scala** + +.. code-block:: scala + + import akka.camel.CamelContextManager + + CamelContextManager.init // optionally takes a CamelContext as argument + CamelContextManager.start // starts the managed CamelContext + +**Java** + +.. code-block:: java + + import akka.camel.CamelContextManager; + + CamelContextManager.init(); // optionally takes a CamelContext as argument + CamelContextManager; // starts the managed CamelContext + +For using producer actors, application may also start a CamelService. This will +not only setup a CamelContextManager behind the scenes but also register +listeners at the actor registry (needed to publish consumer actors). If your +application uses producer actors only and you don't want to have the (very +small) overhead generated by the registry listeners then setting up a +CamelContextManager without starting CamelService is recommended. Otherwise, +just start a CamelService as described for consumer +actors: :ref:`camel-consumers-and-camel-service`. + + +Producer trait +-------------- + +Actors (untyped) +^^^^^^^^^^^^^^^^ + +For sending messages to Camel endpoints, actors + +* written in Scala need to mixin the `Producer`_ trait and implement the + endpointUri method. + +* written in Java need to extend the abstract UntypedProducerActor class and + implement the getEndpointUri() method. By extending the UntypedProducerActor + class, untyped actors (Java) inherit the behaviour of the Producer trait. + +.. _Producer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Producer.scala + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor + import akka.camel.Producer + + class Producer1 extends Actor with Producer { + def endpointUri = "http://localhost:8080/news" + } + +**Java** + +.. code-block:: java + + import akka.camel.UntypedProducerActor; + + public class Producer1 extends UntypedProducerActor { + public String getEndpointUri() { + return "http://localhost:8080/news"; + } + } + +Producer1 inherits a default implementation of the receive method from the +Producer trait. To customize a producer actor's default behavior it is +recommended to override the Producer.receiveBeforeProduce and +Producer.receiveAfterProduce methods. This is explained later in more detail. +Actors should not override the default Producer.receive method. + +Any message sent to a Producer actor (or UntypedProducerActor) will be sent to +the associated Camel endpoint, in the above example to +``http://localhost:8080/news``. Response messages (if supported by the +configured endpoint) will, by default, be returned to the original sender. The +following example uses the ``?`` operator (Scala) to send a message to a +Producer actor and waits for a response. In Java, the sendRequestReply method is +used. + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor._ + import akka.actor.ActorRef + + val producer = actorOf[Producer1] + val response = (producer ? "akka rocks").get + val body = response.bodyAs[String] + +**Java** + +.. code-block:: java + + import akka.actor.ActorRef; + import static akka.actor.Actors.*; + import akka.camel.Message; + + ActorRef producer = actorOf(Producer1.class); + Message response = (Message)producer.sendRequestReply("akka rocks"); + String body = response.getBodyAs(String.class) + +If the message is sent using the ! operator (or the tell method in Java) +then the response message is sent back asynchronously to the original sender. In +the following example, a Sender actor sends a message (a String) to a producer +actor using the ! operator and asynchronously receives a response (of type +Message). + +**Scala** + +.. code-block:: scala + + import akka.actor.{Actor, ActorRef} + import akka.camel.Message + + class Sender(producer: ActorRef) extends Actor { + def receive = { + case request: String => producer ! request + case response: Message => { + /* process response ... */ + } + // ... + } + } + +**Java** + +.. code-block:: java + + // TODO + + +.. _camel-custom-processing: + +Custom Processing +^^^^^^^^^^^^^^^^^ + +Instead of replying to the initial sender, producer actors can implement custom +reponse processing by overriding the receiveAfterProduce method (Scala) or +onReceiveAfterProduce method (Java). In the following example, the reponse +message is forwarded to a target actor instead of being replied to the original +sender. + +**Scala** + +.. code-block:: scala + + import akka.actor.{Actor, ActorRef} + import akka.camel.Producer + + class Producer1(target: ActorRef) extends Actor with Producer { + def endpointUri = "http://localhost:8080/news" + + override protected def receiveAfterProduce = { + // do not reply but forward result to target + case msg => target forward msg + } + } + +**Java** + +.. code-block:: java + + import akka.actor.ActorRef; + import akka.camel.UntypedProducerActor; + + public class Producer1 extends UntypedProducerActor { + private ActorRef target; + + public Producer1(ActorRef target) { + this.target = target; + } + + public String getEndpointUri() { + return "http://localhost:8080/news"; + } + + @Override + public void onReceiveAfterProduce(Object message) { + target.forward((Message)message, getContext()); + } + } + +To create an untyped actor instance with a constructor argument, a factory is +needed (this should be doable without a factory in upcoming Akka versions). + +.. code-block:: java + + import akka.actor.ActorRef; + import akka.actor.UntypedActorFactory; + import akka.actor.UntypedActor; + + public class Producer1Factory implements UntypedActorFactory { + + private ActorRef target; + + public Producer1Factory(ActorRef target) { + this.target = target; + } + + public UntypedActor create() { + return new Producer1(target); + } + } + +The instanitation is done with the Actors.actorOf method and the factory as +argument. + +.. code-block:: java + + import static akka.actor.Actors.*; + import akka.actor.ActorRef; + + ActorRef target = ... + ActorRef producer = actorOf(new Producer1Factory(target)); + producer; + +Before producing messages to endpoints, producer actors can pre-process them by +overriding the receiveBeforeProduce method (Scala) or onReceiveBeforeProduce +method (Java). + +**Scala** + +.. code-block:: scala + + import akka.actor.{Actor, ActorRef} + import akka.camel.{Message, Producer} + + class Producer1(target: ActorRef) extends Actor with Producer { + def endpointUri = "http://localhost:8080/news" + + override protected def receiveBeforeProduce = { + case msg: Message => { + // do some pre-processing (e.g. add endpoint-specific message headers) + // ... + + // and return the modified message + msg + } + } + } + +**Java** + +.. code-block:: java + + import akka.actor.ActorRef; + import akka.camel.Message + import akka.camel.UntypedProducerActor; + + public class Producer1 extends UntypedProducerActor { + private ActorRef target; + + public Producer1(ActorRef target) { + this.target = target; + } + + public String getEndpointUri() { + return "http://localhost:8080/news"; + } + + @Override + public Object onReceiveBeforeProduce(Object message) { + Message msg = (Message)message; + // do some pre-processing (e.g. add endpoint-specific message headers) + // ... + + // and return the modified message + return msg + } + } + + +Producer configuration options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The interaction of producer actors with Camel endpoints can be configured to be +one-way or two-way (by initiating in-only or in-out message exchanges, +respectively). By default, the producer initiates an in-out message exchange +with the endpoint. For initiating an in-only exchange, producer actors + +* written in Scala either have to override the oneway method to return true +* written in Java have to override the isOneway method to return true. + +**Scala** + +.. code-block:: scala + + import akka.camel.Producer + + class Producer2 extends Actor with Producer { + def endpointUri = "jms:queue:test" + override def oneway = true + } + +**Java** + +.. code-block:: java + + import akka.camel.UntypedProducerActor; + + public class SampleUntypedReplyingProducer extends UntypedProducerActor { + public String getEndpointUri() { + return "jms:queue:test"; + } + + @Override + public boolean isOneway() { + return true; + } + } + +Message correlation +^^^^^^^^^^^^^^^^^^^ + +To correlate request with response messages, applications can set the +Message.MessageExchangeId message header. + +**Scala** + +.. code-block:: scala + + import akka.camel.Message + + producer ! Message("bar", Map(Message.MessageExchangeId -> "123")) + +**Java** + +.. code-block:: java + + // TODO + +Responses of type Message or Failure will contain that header as well. When +receiving messages from Camel endpoints this message header is already set (see +:ref:`camel-consume-messages`). + + +Matching responses +^^^^^^^^^^^^^^^^^^ + +The following code snippet shows how to best match responses when sending +messages with the ``?`` operator (Scala) or with the ``ask`` method +(Java). + +**Scala** + +.. code-block:: scala + + val response = (producer ? message).get + + response match { + case Some(Message(body, headers)) => ... + case Some(Failure(exception, headers)) => ... + case _ => ... + } + +**Java** + +.. code-block:: java + + // TODO + + +ProducerTemplate +---------------- + +The `Producer`_ trait (and the abstract UntypedProducerActor class) is a very +convenient way for actors to produce messages to Camel endpoints. (Untyped) +actors and typed actors may also use a Camel `ProducerTemplate`_ for producing +messages to endpoints. For typed actors it's the only way to produce messages to +Camel endpoints. + +At the moment, only the Producer trait fully supports asynchronous in-out +message exchanges with Camel endpoints without allocating a thread for the full +duration of the exchange. For example, when using endpoints that support +asynchronous message exchanges (such as Jetty endpoints that internally use +`Jetty's asynchronous HTTP client`_) then usage of the Producer trait is highly +recommended (see also :ref:`camel-asynchronous-routing`). + +.. _Producer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Producer.scala +.. _ProducerTemplate: http://camel.apache.org/maven/camel-2.2.0/camel-core/apidocs/index.html +.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient + + +Actors (untyped) +^^^^^^^^^^^^^^^^ + +A managed ProducerTemplate instance can be obtained via +CamelContextManager.mandatoryTemplate. In the following example, an actor uses a +ProducerTemplate to send a one-way message to a ``direct:news`` endpoint. + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor + import akka.camel.CamelContextManager + + class ProducerActor extends Actor { + protected def receive = { + // one-way message exchange with direct:news endpoint + case msg => CamelContextManager.mandatoryTemplate.sendBody("direct:news", msg) + } + } + +**Java** + +.. code-block:: java + + import akka.actor.UntypedActor; + import akka.camel.CamelContextManager; + + public class SampleUntypedActor extends UntypedActor { + public void onReceive(Object msg) { + CamelContextManager.getMandatoryTemplate().sendBody("direct:news", msg); + } + } + +Alternatively, one can also use ``Option[ProducerTemplate]`` returned by +``CamelContextManager.template``. + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor + import akka.camel.CamelContextManager + + class ProducerActor extends Actor { + protected def receive = { + // one-way message exchange with direct:news endpoint + case msg => for(t <- CamelContextManager.template) t.sendBody("direct:news", msg) + } + } + +**Java** + +.. code-block:: java + + import org.apache.camel.ProducerTemplate + + import akka.actor.UntypedActor; + import akka.camel.CamelContextManager; + + public class SampleUntypedActor extends UntypedActor { + public void onReceive(Object msg) { + for (ProducerTemplate t : CamelContextManager.getTemplate()) { + t.sendBody("direct:news", msg); + } + } + } + +For initiating a a two-way message exchange, one of the +``ProducerTemplate.request*`` methods must be used. + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor + import akka.camel.CamelContextManager + + class ProducerActor extends Actor { + protected def receive = { + // two-way message exchange with direct:news endpoint + case msg => self.reply(CamelContextManager.mandatoryTemplate.requestBody("direct:news", msg)) + } + } + +**Java** + +.. code-block:: java + + import akka.actor.UntypedActor; + import akka.camel.CamelContextManager; + + public class SampleUntypedActor extends UntypedActor { + public void onReceive(Object msg) { + getContext().tryReply(CamelContextManager.getMandatoryTemplate().requestBody("direct:news", msg)); + } + } + + +Typed actors +^^^^^^^^^^^^ + +Typed Actors get access to a managed ProducerTemplate in the same way, as shown +in the next example. + +**Scala** + +.. code-block:: scala + + // TODO + +**Java** + +.. code-block:: java + + import akka.actor.TypedActor; + import akka.camel.CamelContextManager; + + public class SampleProducerImpl extends TypedActor implements SampleProducer { + public void foo(String msg) { + ProducerTemplate template = CamelContextManager.getMandatoryTemplate(); + template.sendBody("direct:news", msg); + } + } + + +.. _camel-asynchronous-routing: + +Asynchronous routing +==================== + +Since Akka 0.10, in-out message exchanges between endpoints and actors are +designed to be asynchronous. This is the case for both, consumer and producer +actors. + +* A consumer endpoint sends request messages to its consumer actor using the ``!`` + (bang) operator and the actor returns responses with self.reply once they are + ready. The sender reference used for reply is an adapter to Camel's asynchronous + routing engine that implements the ActorRef trait. + +* A producer actor sends request messages to its endpoint using Camel's + asynchronous routing engine. Asynchronous responses are wrapped and added to the + producer actor's mailbox for later processing. By default, response messages are + returned to the initial sender but this can be overridden by Producer + implementations (see also description of the ``receiveAfterProcessing`` method + in :ref:`camel-custom-processing`). + +However, asynchronous two-way message exchanges, without allocating a thread for +the full duration of exchange, cannot be generically supported by Camel's +asynchronous routing engine alone. This must be supported by the individual +`Camel components`_ (from which endpoints are created) as well. They must be +able to suspend any work started for request processing (thereby freeing threads +to do other work) and resume processing when the response is ready. This is +currently the case for a `subset of components`_ such as the `Jetty component`_. +All other Camel components can still be used, of course, but they will cause +allocation of a thread for the duration of an in-out message exchange. There's +also a :ref:`camel-async-example` that implements both, an asynchronous +consumer and an asynchronous producer, with the jetty component. + +.. _Camel components: http://camel.apache.org/components.html +.. _subset of components: http://camel.apache.org/asynchronous-routing-engine.html +.. _Jetty component: http://camel.apache.org/jetty.html + + +Fault tolerance +=============== + +Consumer actors and typed actors can be also managed by supervisors. If a +consumer is configured to be restarted upon failure the associated Camel +endpoint is not restarted. It's behaviour during restart is as follows. + +* A one-way (in-only) message exchange will be queued by the consumer and + processed once restart completes. + +* A two-way (in-out) message exchange will wait and either succeed after restart + completes or time-out when the restart duration exceeds + the :ref:`camel-timeout`. + +If a consumer is configured to be shut down upon failure, the associated +endpoint is shut down as well. For details refer to :ref:`camel-unpublishing`. + +For examples, tips and trick how to implement fault-tolerant consumer and +producer actors, take a look at these two articles. + +* `Akka Consumer Actors: New Features and Best Practices `_ +* `Akka Producer Actors: New Features and Best Practices `_ + + +.. _camel-configuration: + +CamelService configuration +========================== + +For publishing consumer actors and typed actor methods +(:ref:`camel-publishing`), applications must start a CamelService. When starting +Akka in :ref:`microkernel` mode then a CamelService can be started automatically +when camel is added to the enabled-modules list in :ref:`configuration`, for example: + +.. code-block:: none + + akka { + ... + enabled-modules = ["camel"] # Options: ["remote", "camel", "http"] + ... + } + +Applications that do not use the Akka Kernel, such as standalone applications +for example, need to start a CamelService manually, as explained in the +following subsections.When starting a CamelService manually, settings in +:ref:`configuration` are ignored. + + +Standalone applications +----------------------- + +Standalone application should create and start a CamelService in the following way. + +**Scala** + +.. code-block:: scala + + import akka.camel.CamelServiceManager._ + + startCamelService + +**Java** + +.. code-block:: java + + import static akka.camel.CamelServiceManager.*; + + startCamelService(); + +Internally, a CamelService uses the CamelContextManager singleton to manage a +CamelContext. A CamelContext manages the routes from endpoints to consumer +actors and typed actors. These routes are added and removed at runtime (when +(untyped) consumer actors and typed consumer actors are started and stopped). +Applications may additionally want to add their own custom routes or modify the +CamelContext in some other way. This can be done by initializing the +CamelContextManager manually and making modifications to CamelContext **before** +the CamelService is started. + +**Scala** + +.. code-block:: scala + + import org.apache.camel.builder.RouteBuilder + + import akka.camel.CamelContextManager + import akka.camel.CamelServiceManager._ + + CamelContextManager.init + + // add a custom route to the managed CamelContext + CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) + + startCamelService + + // an application-specific route builder + class CustomRouteBuilder extends RouteBuilder { + def configure { + // ... + } + } + +**Java** + +.. code-block:: java + + import org.apache.camel.builder.RouteBuilder; + + import akka.camel.CamelContextManager; + import static akka.camel.CamelServiceManager.*; + + CamelContextManager.init(); + + // add a custom route to the managed CamelContext + CamelContextManager.getMandatoryContext().addRoutes(new CustomRouteBuilder()); + + startCamelService(); + + // an application-specific route builder + private static class CustomRouteBuilder extends RouteBuilder { + public void configure() { + // ... + } + } + + +Applications may even provide their own CamelContext instance as argument to the +init method call as shown in the following snippet. Here, a DefaultCamelContext +is created using a Spring application context as `registry`_. + +.. _registry: http://camel.apache.org/registry.html + + +**Scala** + +.. code-block:: scala + + import org.apache.camel.impl.DefaultCamelContext + import org.apache.camel.spring.spi.ApplicationContextRegistry + import org.springframework.context.support.ClassPathXmlApplicationContext + + import akka.camel.CamelContextManager + import akka.camel.CamelServiceManager._ + + // create a custom Camel registry backed up by a Spring application context + val context = new ClassPathXmlApplicationContext("/context.xml") + val registry = new ApplicationContextRegistry(context) + + // initialize CamelContextManager with a DefaultCamelContext using the custom registry + CamelContextManager.init(new DefaultCamelContext(registry)) + + // ... + + startCamelService + +**Java** + +.. code-block:: java + + import org.apache.camel.impl.DefaultCamelContext + import org.apache.camel.spi.Registry; + import org.apache.camel.spring.spi.ApplicationContextRegistry; + + import org.springframework.context.ApplicationContext; + import org.springframework.context.support.ClassPathXmlApplicationContext; + + import akka.camel.CamelContextManager; + import static akka.camel.CamelServiceManager.*; + + // create a custom Camel registry backed up by a Spring application context + ApplicationContext context = new ClassPathXmlApplicationContext("/context.xml"); + Registry registry = new ApplicationContextRegistry(context); + + // initialize CamelContextManager with a DefaultCamelContext using the custom registry + CamelContextManager.init(new DefaultCamelContext(registry)); + + // ... + + startCamelService(); + + +.. _camel-spring-applications: + +Standalone Spring applications +------------------------------ + +A better approach to configure a Spring application context as registry for the +CamelContext is to use `Camel's Spring support`_. Furthermore, +the :ref:`spring-module` module additionally supports a element +for creating and starting a CamelService. An optional reference to a custom +CamelContext can be defined for as well. Here's an example. + +.. _Camel's Spring support: http://camel.apache.org/spring.html + +.. code-block:: xml + + + + + + + + + + + + + + + + + +Creating a CamelContext this way automatically adds the defining Spring +application context as registry to that CamelContext. The CamelService is +started when the application context is started and stopped when the application +context is closed. A simple usage example is shown in the following snippet. + +**Scala** + +.. code-block:: scala + + import org.springframework.context.support.ClassPathXmlApplicationContext + import akka.camel.CamelContextManager + + // Create and start application context (start CamelService) + val appctx = new ClassPathXmlApplicationContext("/context.xml") + + // Access to CamelContext (SpringCamelContext) + val ctx = CamelContextManager.mandatoryContext + // Access to ProducerTemplate of that CamelContext + val tpl = CamelContextManager.mandatoryTemplate + + // use ctx and tpl ... + + // Close application context (stop CamelService) + appctx.close + +**Java** + +.. code-block:: java + + // TODO + + +If the CamelService doesn't reference a custom CamelContext then a +DefaultCamelContext is created (and accessible via the CamelContextManager). + +.. code-block:: xml + + + + + + + + + +Kernel mode +----------- + +For classes that are loaded by the Kernel or the Initializer, starting the +CamelService can be omitted, as discussed in the previous section. Since these +classes are loaded and instantiated before the CamelService is started (by +Akka), applications can make modifications to a CamelContext here as well (and +even provide their own CamelContext). Assuming there's a boot class +sample.camel.Boot configured in :ref:`configuration`. + +.. code-block:: none + + akka { + ... + boot = ["sample.camel.Boot"] + ... + } + +Modifications to the CamelContext can be done like in the following snippet. + +**Scala** + +.. code-block:: scala + + package sample.camel + + import org.apache.camel.builder.RouteBuilder + + import akka.camel.CamelContextManager + + class Boot { + CamelContextManager.init + + // Customize CamelContext with application-specific routes + CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) + + // No need to start CamelService here. It will be started + // when this classes has been loaded and instantiated. + } + + class CustomRouteBuilder extends RouteBuilder { + def configure { + // ... + } + } + +**Java** + +.. code-block:: java + + // TODO + + +Custom Camel routes +=================== + +In all the examples so far, routes to consumer actors have been automatically +constructed by akka-camel, when the actor was started. Although the default +route construction templates, used by akka-camel internally, are sufficient for +most use cases, some applications may require more specialized routes to actors. +The akka-camel module provides two mechanisms for customizing routes to actors, +which will be explained in this section. These are + +* Usage of :ref:`camel-components` to access (untyped) actor and actors. + Any Camel route can use these components to access Akka actors. + +* :ref:`camel-intercepting-route-construction` to (untyped) actor and actors. + Default routes to consumer actors are extended using predefined extension + points. + + +.. _camel-components: + +Akka Camel components +--------------------- + +Akka actors can be access from Camel routes using the `actor`_ and +`typed-actor`_ Camel components, respectively. These components can be used to +access any Akka actor (not only consumer actors) from Camel routes, as described +in the following sections. + +.. _actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/ActorComponent.scala +.. _typed-actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/TypedActorComponent.scala + + +Access to actors +---------------- + +To access (untyped) actors from custom Camel routes, the `actor`_ Camel +component should be used. It fully supports Camel's `asynchronous routing +engine`_. + +.. _actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/ActorComponent.scala +.. _asynchronous routing engine: http://camel.apache.org/asynchronous-routing-engine.html + +This component accepts the following enpoint URI formats: + +* ``actor:[?]`` +* ``actor:id:[][?]`` +* ``actor:uuid:[][?]`` + +where ```` and ```` refer to ``actorRef.id`` and the +String-representation of ``actorRef.uuid``, respectively. The ```` are +name-value pairs separated by ``&`` (i.e. ``name1=value1&name2=value2&...``). + + +URI options +^^^^^^^^^^^ + +The following URI options are supported: + ++----------+---------+---------+-------------------------------------------+ +| Name | Type | Default | Description | ++==========+=========+=========+===========================================+ +| blocking | Boolean | false | If set to true, in-out message exchanges | +| | | | with the target actor will be made with | +| | | | the ``!!`` operator, otherwise with the | +| | | | ``!`` operator. | +| | | | | +| | | | See also :ref:`camel-timeout`. | ++----------+---------+---------+-------------------------------------------+ +| autoack | Boolean | true | If set to true, in-only message exchanges | +| | | | are auto-acknowledged when the message is | +| | | | added to the actor's mailbox. If set to | +| | | | false, actors must acknowledge the | +| | | | receipt of the message. | +| | | | | +| | | | See also :ref:`camel-acknowledgements`. | ++----------+---------+---------+-------------------------------------------+ + +Here's an actor endpoint URI example containing an actor uuid:: + + actor:uuid:12345678?blocking=true + +In actor endpoint URIs that contain id: or uuid:, an actor identifier (id or +uuid) is optional. In this case, the in-message of an exchange produced to an +actor endpoint must contain a message header with name CamelActorIdentifier +(which is defined by the ActorComponent.ActorIdentifier field) and a value that +is the target actor's identifier. On the other hand, if the URI contains an +actor identifier, it can be seen as a default actor identifier that can be +overridden by messages containing a CamelActorIdentifier header. + + +Message headers +^^^^^^^^^^^^^^^ + ++----------------------+--------+-------------------------------------------+ +| Name | Type | Description | ++======================+========+===========================================+ +| CamelActorIdentifier | String | Contains the identifier (id or uuid) of | +| | | the actor to route the message to. The | +| | | identifier is interpreted as actor id if | +| | | the URI contains id:, the identifier is | +| | | interpreted as uuid id the URI contains | +| | | uuid:. A uuid value may also be of type | +| | | Uuid (not only String). The header name | +| | | is defined by the | +| | | ActorComponent.ActorIdentifier field. | ++----------------------+--------+-------------------------------------------+ + +Here's another actor endpoint URI example that doesn't define an actor uuid. In +this case the target actor uuid must be defined by the CamelActorIdentifier +message header:: + + actor:uuid: + +In the following example, a custom route to an actor is created, using the +actor's uuid (i.e. actorRef.uuid). The route starts from a `Jetty`_ endpoint and +ends at the target actor. + + +**Scala** + +.. code-block:: scala + + import org.apache.camel.builder.RouteBuilder + + import akka.actor._ + import akka.actor.Actor + import akka.actor.Actor._ + import akka.camel.{Message, CamelContextManager, CamelServiceManager} + + object CustomRouteExample extends Application { + val target = actorOf[CustomRouteTarget] + + CamelServiceManager.startCamelService + CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder(target.uuid)) + } + + class CustomRouteTarget extends Actor { + def receive = { + case msg: Message => self.reply("Hello %s" format msg.bodyAs[String]) + } + } + + class CustomRouteBuilder(uuid: Uuid) extends RouteBuilder { + def configure { + val actorUri = "actor:uuid:%s" format uuid + from("jetty:http://localhost:8877/camel/custom").to(actorUri) + } + } + + +**Java** + +.. code-block:: java + + import com.eaio.uuid.UUID; + + import org.apache.camel.builder.RouteBuilder; + import static akka.actor.Actors.*; + import akka.actor.ActorRef; + import akka.actor.UntypedActor; + import akka.camel.CamelServiceManager; + import akka.camel.CamelContextManager; + import akka.camel.Message; + + public class CustomRouteExample { + public static void main(String... args) throws Exception { + ActorRef target = actorOf(CustomRouteTarget.class); + CamelServiceManager.startCamelService(); + CamelContextManager.getMandatoryContext().addRoutes(new CustomRouteBuilder(target.getUuid())); + } + } + + public class CustomRouteTarget extends UntypedActor { + public void onReceive(Object message) { + Message msg = (Message) message; + String body = msg.getBodyAs(String.class); + getContext().tryReply(String.format("Hello %s", body)); + } + } + + public class CustomRouteBuilder extends RouteBuilder { + private UUID uuid; + + public CustomRouteBuilder(UUID uuid) { + this.uuid = uuid; + } + + public void configure() { + String actorUri = String.format("actor:uuid:%s", uuid); + from("jetty:http://localhost:8877/camel/custom").to(actorUri); + } + } + +When the example is started, messages POSTed to +``http://localhost:8877/camel/custom`` are routed to the target actor. + + +Access to typed actors +---------------------- + +To access typed actor methods from custom Camel routes, the `typed-actor`_ Camel +component should be used. It is a specialization of the Camel `bean`_ component. +Applications should use the interface (endpoint URI syntax and options) as +described in the bean component documentation but with the typed-actor schema. +Typed Actors must be added to a `Camel registry`_ for being accessible by the +typed-actor component. + +.. _typed-actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/TypedActorComponent.scala +.. _bean: http://camel.apache.org/bean.html +.. _Camel registry: http://camel.apache.org/registry.html + + +.. _camel-typed-actors-using-spring: + +Using Spring +^^^^^^^^^^^^ + +The following example shows how to access typed actors in a Spring application +context. For adding typed actors to the application context and for starting +:ref:`camel-spring-applications` the :ref:`spring-module` module is used in the +following example. It offers a ```` element to define typed actor +factory beans and a ```` element to create and start a +CamelService. + +.. code-block:: xml + + + + + + + + + + + + + + + + + +SampleTypedActor is the typed actor interface and SampleTypedActorImpl in the +typed actor implementation class. + +**Scala** + +.. code-block:: scala + + package sample + + import akka.actor.TypedActor + + trait SampleTypedActor { + def foo(s: String): String + } + + class SampleTypedActorImpl extends TypedActor with SampleTypedActor { + def foo(s: String) = "hello %s" format s + } + +**Java** + +.. code-block:: java + + package sample; + + import akka.actor.TypedActor; + + public interface SampleTypedActor { + public String foo(String s); + } + + public class SampleTypedActorImpl extends TypedActor implements SampleTypedActor { + + public String foo(String s) { + return "hello " + s; + } + } + +The SampleRouteBuilder defines a custom route from the direct:test endpoint to +the sample typed actor using a typed-actor endpoint URI. + +**Scala** + +.. code-block:: scala + + package sample + + import org.apache.camel.builder.RouteBuilder + + class SampleRouteBuilder extends RouteBuilder { + def configure = { + // route to typed actor + from("direct:test").to("typed-actor:sample?method=foo") + } + } + +**Java** + +.. code-block:: java + + package sample; + + import org.apache.camel.builder.RouteBuilder; + + public class SampleRouteBuilder extends RouteBuilder { + public void configure() { + // route to typed actor + from("direct:test").to("typed-actor:sample?method=foo"); + } + } + +The typed-actor endpoint URI syntax is::: + + typed-actor:?method= + +where ```` is the id of the bean in the Spring application context and +```` is the name of the typed actor method to invoke. + +Usage of the custom route for sending a message to the typed actor is shown in +the following snippet. + +**Scala** + +.. code-block:: scala + + package sample + + import org.springframework.context.support.ClassPathXmlApplicationContext + import akka.camel.CamelContextManager + + // load Spring application context (starts CamelService) + val appctx = new ClassPathXmlApplicationContext("/context-standalone.xml") + + // access 'sample' typed actor via custom route + assert("hello akka" == CamelContextManager.mandatoryTemplate.requestBody("direct:test", "akka")) + + // close Spring application context (stops CamelService) + appctx.close + +**Java** + +.. code-block:: java + + package sample; + + import org.springframework.context.support.ClassPathXmlApplicationContext; + import akka.camel.CamelContextManager; + + // load Spring application context + ClassPathXmlApplicationContext appctx = new ClassPathXmlApplicationContext("/context-standalone.xml"); + + // access 'externally' registered typed actors with typed-actor component + assert("hello akka" == CamelContextManager.getMandatoryTemplate().requestBody("direct:test", "akka")); + + // close Spring application context (stops CamelService) + appctx.close(); + +The application uses a Camel `producer template`_ to access the typed actor via +the ``direct:test`` endpoint. + +.. _producer template: http://camel.apache.org/producertemplate.html + + +Without Spring +^^^^^^^^^^^^^^ + +Usage of :ref:`spring-module` for adding typed actors to the Camel registry and +starting a CamelService is optional. Setting up a Spring-less application for +accessing typed actors is shown in the next example. + +**Scala** + +.. code-block:: scala + + package sample + + import org.apache.camel.impl.{DefaultCamelContext, SimpleRegistry} + import akka.actor.TypedActor + import akka.camel.CamelContextManager + import akka.camel.CamelServiceManager._ + + // register typed actor + val registry = new SimpleRegistry + registry.put("sample", TypedActor.newInstance(classOf[SampleTypedActor], classOf[SampleTypedActorImpl])) + + // customize CamelContext + CamelContextManager.init(new DefaultCamelContext(registry)) + CamelContextManager.mandatoryContext.addRoutes(new SampleRouteBuilder) + + startCamelService + + // access 'sample' typed actor via custom route + assert("hello akka" == CamelContextManager.mandatoryTemplate.requestBody("direct:test", "akka")) + + stopCamelService + +**Java** + +.. code-block:: java + + package sample; + + // register typed actor + SimpleRegistry registry = new SimpleRegistry(); + registry.put("sample", TypedActor.newInstance(SampleTypedActor.class, SampleTypedActorImpl.class)); + + // customize CamelContext + CamelContextManager.init(new DefaultCamelContext(registry)); + CamelContextManager.getMandatoryContext().addRoutes(new SampleRouteBuilder()); + + startCamelService(); + + // access 'sample' typed actor via custom route + assert("hello akka" == CamelContextManager.getMandatoryTemplate().requestBody("direct:test", "akka")); + + stopCamelService(); + +Here, `SimpleRegistry`_, a java.util.Map based registry, is used to register +typed actors. The CamelService is started and stopped programmatically. + +.. _SimpleRegistry: https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/impl/SimpleRegistry.java + + +.. _camel-intercepting-route-construction: + +Intercepting route construction +------------------------------- + +The previous section, :ref:`camel-components`, explained how to setup a route to +an (untyped) actor or typed actor manually. It was the application's +responsibility to define the route and add it to the current CamelContext. This +section explains a more conventient way to define custom routes: akka-camel is +still setting up the routes to consumer actors (and adds these routes to the +current CamelContext) but applications can define extensions to these routes. +Extensions can be defined with Camel's `Java DSL`_ or `Scala DSL`_. For example, +an extension could be a custom error handler that redelivers messages from an +endpoint to an actor's bounded mailbox when the mailbox was full. + +.. _Java DSL: http://camel.apache.org/dsl.html +.. _Scala DSL: http://camel.apache.org/scala-dsl.html + +The following examples demonstrate how to extend a route to a consumer actor for +handling exceptions thrown by that actor. To simplify the example, we configure +:ref:`camel-blocking-exchanges` which reports any exception, that is thrown by +receive, directly back to the Camel route. One could also report exceptions +asynchronously using a Failure reply (see also `this article`__) but we'll do it +differently here. + +__ http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html + + +Actors (untyped) +^^^^^^^^^^^^^^^^ + +**Scala** + +.. code-block:: scala + + import akka.actor.Actor + import akka.camel.Consumer + + import org.apache.camel.builder.Builder + import org.apache.camel.model.RouteDefinition + + class ErrorHandlingConsumer extends Actor with Consumer { + def endpointUri = "direct:error-handler-test" + + // Needed to propagate exception back to caller + override def blocking = true + + onRouteDefinition {rd: RouteDefinition => + // Catch any exception and handle it by returning the exception message as response + rd.onException(classOf[Exception]).handled(true).transform(Builder.exceptionMessage).end + } + + protected def receive = { + case msg: Message => throw new Exception("error: %s" format msg.body) + } + } + +**Java** + +.. code-block:: java + + import akka.camel.UntypedConsumerActor; + + import org.apache.camel.builder.Builder; + import org.apache.camel.model.ProcessorDefinition; + import org.apache.camel.model.RouteDefinition; + + public class SampleErrorHandlingConsumer extends UntypedConsumerActor { + + public String getEndpointUri() { + return "direct:error-handler-test"; + } + + // Needed to propagate exception back to caller + public boolean isBlocking() { + return true; + } + + public void preStart() { + onRouteDefinition(new RouteDefinitionHandler() { + public ProcessorDefinition onRouteDefinition(RouteDefinition rd) { + // Catch any exception and handle it by returning the exception message as response + return rd.onException(Exception.class).handled(true).transform(Builder.exceptionMessage()).end(); + } + }); + } + + public void onReceive(Object message) throws Exception { + Message msg = (Message)message; + String body = msg.getBodyAs(String.class); + throw new Exception(String.format("error: %s", body)); + } + + } + + + +For (untyped) actors, consumer route extensions are defined by calling the +onRouteDefinition method with a route definition handler. In Scala, this is a +function of type ``RouteDefinition => ProcessorDefinition[_]``, in Java it is an +instance of ``RouteDefinitionHandler`` which is defined as follows. + +.. code-block:: scala + + package akka.camel + + import org.apache.camel.model.RouteDefinition + import org.apache.camel.model.ProcessorDefinition + + trait RouteDefinitionHandler { + def onRouteDefinition(rd: RouteDefinition): ProcessorDefinition[_] + } + +The akka-camel module creates a RouteDefinition instance by calling +from(endpointUri) on a Camel RouteBuilder (where endpointUri is the endpoint URI +of the consumer actor) and passes that instance as argument to the route +definition handler \*). The route definition handler then extends the route and +returns a ProcessorDefinition (in the above example, the ProcessorDefinition +returned by the end method. See the `org.apache.camel.model`__ package for +details). After executing the route definition handler, akka-camel finally calls +a to(actor:uuid:actorUuid) on the returned ProcessorDefinition to complete the +route to the comsumer actor (where actorUuid is the uuid of the consumer actor). + +\*) Before passing the RouteDefinition instance to the route definition handler, +akka-camel may make some further modifications to it. + +__ https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/model/ + + +Typed actors +^^^^^^^^^^^^ + +For typed consumer actors to define a route definition handler, they must +provide a RouteDefinitionHandler implementation class with the @consume +annotation. The implementation class must have a no-arg constructor. Here's an +example (in Java). + +.. code-block:: java + + import org.apache.camel.builder.Builder; + import org.apache.camel.model.ProcessorDefinition; + import org.apache.camel.model.RouteDefinition; + + public class SampleRouteDefinitionHandler implements RouteDefinitionHandler { + public ProcessorDefinition onRouteDefinition(RouteDefinition rd) { + return rd.onException(Exception.class).handled(true).transform(Builder.exceptionMessage()).end(); + } + } + +It can be used as follows. + +**Scala** + +.. code-block:: scala + + trait TestTypedConsumer { + @consume(value="direct:error-handler-test", routeDefinitionHandler=classOf[SampleRouteDefinitionHandler]) + def foo(s: String): String + } + + // implementation class omitted + +**Java** + +.. code-block:: java + + public interface SampleErrorHandlingTypedConsumer { + + @consume(value="direct:error-handler-test", routeDefinitionHandler=SampleRouteDefinitionHandler.class) + String foo(String s); + + } + + // implementation class omitted + + +.. _camel-examples: + +Examples +======== + +For all features described so far, there's running sample code in +`akka-sample-camel`_. The examples in `sample.camel.Boot`_ are started during +Kernel startup because this class has been added to the boot :ref:`configuration`. + +.. _akka-sample-camel: http://github.com/jboner/akka/tree/master/akka-samples/akka-sample-camel/ +.. _sample.camel.Boot: http://github.com/jboner/akka/blob/master/akka-samples/akka-sample-camel/src/main/scala/sample/camel/Boot.scala + +.. code-block:: none + + akka { + ... + boot = ["sample.camel.Boot", ...] + ... + } + +If you don't want to have these examples started during Kernel startup, delete +it from the :ref:`configuration`. Other examples are standalone applications (i.e. classes with a +main method) that can be started from `sbt`_. + +.. _sbt: http://code.google.com/p/simple-build-tool/ + +.. code-block:: none + + $ sbt + [info] Building project akka 2.0-SNAPSHOT against Scala 2.9.0 + [info] using AkkaModulesParentProject with sbt 0.7.7 and Scala 2.7.7 + > project akka-sample-camel + Set current project to akka-sample-camel 2.0-SNAPSHOT + > run + ... + Multiple main classes detected, select one to run: + + [1] sample.camel.ClientApplication + [2] sample.camel.ServerApplication + [3] sample.camel.StandaloneSpringApplication + [4] sample.camel.StandaloneApplication + [5] sample.camel.StandaloneFileApplication + [6] sample.camel.StandaloneJmsApplication + + +Some of the examples in `akka-sample-camel`_ are described in more detail in the +following subsections. + + +.. _camel-async-example: + +Asynchronous routing and transformation example +----------------------------------------------- + +This example demonstrates how to implement consumer and producer actors that +support :ref:`camel-asynchronous-routing` with their Camel endpoints. The sample +application transforms the content of the Akka homepage, http://akka.io, by +replacing every occurrence of *Akka* with *AKKA*. After starting +the :ref:`microkernel`, direct the browser to http://localhost:8875 and the +transformed Akka homepage should be displayed. Please note that this example +will probably not work if you're behind an HTTP proxy. + +The following figure gives an overview how the example actors interact with +external systems and with each other. A browser sends a GET request to +http://localhost:8875 which is the published endpoint of the ``HttpConsumer`` +actor. The ``HttpConsumer`` actor forwards the requests to the ``HttpProducer`` +actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML +is then forwarded to the ``HttpTransformer`` actor which replaces all occurences +of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer +which finally returns it to the browser. + +.. image:: camel-async-interact.png + +Implementing the example actor classes and wiring them together is rather easy +as shown in the following snippet (see also `sample.camel.Boot`_). + +.. code-block:: scala + + import org.apache.camel.Exchange + import akka.actor.Actor._ + import akka.actor.{Actor, ActorRef} + import akka.camel.{Producer, Message, Consumer} + + class HttpConsumer(producer: ActorRef) extends Actor with Consumer { + def endpointUri = "jetty:http://0.0.0.0:8875/" + + protected def receive = { + case msg => producer forward msg + } + } + + class HttpProducer(transformer: ActorRef) extends Actor with Producer { + def endpointUri = "jetty://http://akka.io/?bridgeEndpoint=true" + + override protected def receiveBeforeProduce = { + // only keep Exchange.HTTP_PATH message header (which needed by bridge endpoint) + case msg: Message => msg.setHeaders(msg.headers(Set(Exchange.HTTP_PATH))) + } + + override protected def receiveAfterProduce = { + // do not reply but forward result to transformer + case msg => transformer forward msg + } + } + + class HttpTransformer extends Actor { + protected def receive = { + case msg: Message => self.reply(msg.transformBody {body: String => body replaceAll ("Akka ", "AKKA ")}) + case msg: Failure => self.reply(msg) + } + } + + // Wire and start the example actors + val httpTransformer = actorOf(new HttpTransformer) + val httpProducer = actorOf(new HttpProducer(httpTransformer)) + val httpConsumer = actorOf(new HttpConsumer(httpProducer)) + +The `jetty endpoints`_ of HttpConsumer and HttpProducer support asynchronous +in-out message exchanges and do not allocate threads for the full duration of +the exchange. This is achieved by using `Jetty continuations`_ on the +consumer-side and by using `Jetty's asynchronous HTTP client`_ on the producer +side. The following high-level sequence diagram illustrates that. + +.. _jetty endpoints: http://camel.apache.org/jetty.html +.. _Jetty continuations: http://wiki.eclipse.org/Jetty/Feature/Continuations +.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient + +.. image:: camel-async-sequence.png + + +Custom Camel route example +-------------------------- + +This section also demonstrates the combined usage of a ``Producer`` and a +``Consumer`` actor as well as the inclusion of a custom Camel route. The +following figure gives an overview. + +.. image:: camel-custom-route.png + +* A consumer actor receives a message from an HTTP client + +* It forwards the message to another actor that transforms the message (encloses + the original message into hyphens) + +* The transformer actor forwards the transformed message to a producer actor + +* The producer actor sends the message to a custom Camel route beginning at the + ``direct:welcome`` endpoint + +* A processor (transformer) in the custom Camel route prepends "Welcome" to the + original message and creates a result message + +* The producer actor sends the result back to the consumer actor which returns + it to the HTTP client + + +The example is part of `sample.camel.Boot`_. The consumer, transformer and +producer actor implementations are as follows. + +.. code-block:: scala + + package sample.camel + + import akka.actor.{Actor, ActorRef} + import akka.camel.{Message, Consumer} + + class Consumer3(transformer: ActorRef) extends Actor with Consumer { + def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome" + + def receive = { + // Forward a string representation of the message body to transformer + case msg: Message => transformer.forward(msg.setBodyAs[String]) + } + } + + class Transformer(producer: ActorRef) extends Actor { + protected def receive = { + // example: transform message body "foo" to "- foo -" and forward result to producer + case msg: Message => producer.forward(msg.transformBody((body: String) => "- %s -" format body)) + } + } + + class Producer1 extends Actor with Producer { + def endpointUri = "direct:welcome" + } + +The producer actor knows where to reply the message to because the consumer and +transformer actors have forwarded the original sender reference as well. The +application configuration and the route starting from direct:welcome are as +follows. + +.. code-block:: scala + + package sample.camel + + import org.apache.camel.builder.RouteBuilder + import org.apache.camel.{Exchange, Processor} + + import akka.actor.Actor._ + import akka.camel.CamelContextManager + + class Boot { + CamelContextManager.init() + CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) + + val producer = actorOf[Producer1] + val mediator = actorOf(new Transformer(producer)) + val consumer = actorOf(new Consumer3(mediator)) + } + + class CustomRouteBuilder extends RouteBuilder { + def configure { + from("direct:welcome").process(new Processor() { + def process(exchange: Exchange) { + // Create a 'welcome' message from the input message + exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody) + } + }) + } + } + +To run the example, start the :ref:`microkernel` and POST a message to +``http://localhost:8877/camel/welcome``. + +.. code-block:: none + + curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome + +The response should be: + +.. code-block:: none + + Welcome - Anke - + + +Publish-subcribe example +------------------------ + +JMS +^^^ + +This section demonstrates how akka-camel can be used to implement +publish/subscribe for actors. The following figure sketches an example for +JMS-based publish/subscribe. + +.. image:: camel-pubsub.png + +A consumer actor receives a message from an HTTP client. It sends the message to +a JMS producer actor (publisher). The JMS producer actor publishes the message +to a JMS topic. Two other actors that subscribed to that topic both receive the +message. The actor classes used in this example are shown in the following +snippet. + +.. code-block:: scala + + package sample.camel + + import akka.actor.{Actor, ActorRef} + import akka.camel.{Producer, Message, Consumer} + + class Subscriber(name:String, uri: String) extends Actor with Consumer { + def endpointUri = uri + + protected def receive = { + case msg: Message => println("%s received: %s" format (name, msg.body)) + } + } + + class Publisher(name: String, uri: String) extends Actor with Producer { + self.id = name + + def endpointUri = uri + + // one-way communication with JMS + override def oneway = true + } + + class PublisherBridge(uri: String, publisher: ActorRef) extends Actor with Consumer { + def endpointUri = uri + + protected def receive = { + case msg: Message => { + publisher ! msg.bodyAs[String] + self.reply("message published") + } + } + } + +Wiring these actors to implement the above example is as simple as + +.. code-block:: scala + + package sample.camel + + import org.apache.camel.impl.DefaultCamelContext + import org.apache.camel.spring.spi.ApplicationContextRegistry + import org.springframework.context.support.ClassPathXmlApplicationContext + + import akka.actor.Actor._ + import akka.camel.CamelContextManager + + class Boot { + // Create CamelContext with Spring-based registry and custom route builder + val context = new ClassPathXmlApplicationContext("/context-jms.xml", getClass) + val registry = new ApplicationContextRegistry(context) + CamelContextManager.init(new DefaultCamelContext(registry)) + + // Setup publish/subscribe example + val jmsUri = "jms:topic:test" + val jmsSubscriber1 = actorOf(new Subscriber("jms-subscriber-1", jmsUri)) + val jmsSubscriber2 = actorOf(new Subscriber("jms-subscriber-2", jmsUri)) + val jmsPublisher = actorOf(new Publisher("jms-publisher", jmsUri)) + + val jmsPublisherBridge = actorOf(new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/jms", jmsPublisher)) + } + +To publish messages to subscribers one could of course also use the JMS API +directly; there's no need to do that over a JMS producer actor as in this +example. For the example to work, Camel's `jms`_ component needs to be +configured with a JMS connection factory which is done in a Spring application +context XML file (context-jms.xml). + +.. _jms: http://camel.apache.org/jms.html + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + + + + + + +To run the example, start the :ref:`microkernel` and POST a +message to ``http://localhost:8877/camel/pub/jms``. + +.. code-block:: none + + curl -H "Content-Type: text/plain" -d "Happy hAkking" http://localhost:8877/camel/pub/jms + +The HTTP response body should be + +.. code-block:: none + + message published + +On the console, where you started the Akka Kernel, you should see something like + +.. code-block:: none + + ... + INF [20100622-11:49:57.688] camel: jms-subscriber-2 received: Happy hAkking + INF [20100622-11:49:57.688] camel: jms-subscriber-1 received: Happy hAkking + + +Cometd +^^^^^^ + +Publish/subscribe with `CometD`_ is equally easy using `Camel's cometd +component`_. + +.. _CometD: http://cometd.org/ +.. _Camel's cometd component: http://camel.apache.org/cometd.html + +.. image:: camel-pubsub2.png + +All actor classes from the JMS example can re-used, only the endpoint URIs need +to be changed. + +.. code-block:: scala + + package sample.camel + + import org.apache.camel.impl.DefaultCamelContext + import org.apache.camel.spring.spi.ApplicationContextRegistry + import org.springframework.context.support.ClassPathXmlApplicationContext + + import akka.actor.Actor._ + import akka.camel.CamelContextManager + + class Boot { + // ... + + // Setup publish/subscribe example + val cometdUri = "cometd://localhost:8111/test/abc?resourceBase=target" + val cometdSubscriber = actorOf(new Subscriber("cometd-subscriber", cometdUri)) + val cometdPublisher = actorOf(new Publisher("cometd-publisher", cometdUri)) + + val cometdPublisherBridge = actorOf(new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/cometd", cometdPublisher)) + } + + +Quartz Scheduler Example +------------------------ + +Here is an example showing how simple is to implement a cron-style scheduler by +using the Camel Quartz component in Akka. + +The following example creates a "timer" actor which fires a message every 2 +seconds: + +.. code-block:: scala + + package com.dimingo.akka + + import akka.actor.Actor + import akka.actor.Actor.actorOf + + import akka.camel.{Consumer, Message} + import akka.camel.CamelServiceManager._ + + class MyQuartzActor extends Actor with Consumer { + + def endpointUri = "quartz://example?cron=0/2+*+*+*+*+?" + + def receive = { + + case msg => println("==============> received %s " format msg) + + } // end receive + + } // end MyQuartzActor + + object MyQuartzActor { + + def main(str: Array[String]) { + + // start the Camel service + startCamelService + + // create and start a quartz actor + val myActor = actorOf[MyQuartzActor] + + } // end main + + } // end MyQuartzActor + +The full working example is available for download here: +http://www.dimingo.com/akka/examples/example-akka-quartz.tar.gz + +You can launch it using the maven command: + +.. code-block:: none + + $ mvn scala:run -DmainClass=com.dimingo.akka.MyQuartzActor + +For more information about the Camel Quartz component, see here: +http://camel.apache.org/quartz.html diff --git a/akka-docs/disabled/microkernel.rst b/akka-docs/disabled/microkernel.rst new file mode 100644 index 0000000000..cbf9ba96ba --- /dev/null +++ b/akka-docs/disabled/microkernel.rst @@ -0,0 +1,40 @@ + +.. _microkernel: + +############# + Microkernel +############# + + +Run the microkernel +=================== + +To start the kernel use the scripts in the ``bin`` directory. + +All services are configured in the :ref:`configuration` file in the ``config`` directory. +Services you want to be started up automatically should be listed in the list of ``boot`` classes in +the :ref:`configuration`. + +Put your application in the ``deploy`` directory. + + +Akka Home +--------- + +Note that the microkernel needs to know where the Akka home is (the base +directory of the microkernel). The above scripts do this for you. Otherwise, you +can set Akka home by: + +* Specifying the ``AKKA_HOME`` environment variable + +* Specifying the ``-Dakka.home`` java option + + +.. _hello-microkernel: + +Hello Microkernel +================= + +There is a very simple Akka Mist sample project included in the microkernel +``deploy`` directory. Start the microkernel with the start script and then go to +http://localhost:9998 to say Hello to the microkernel. diff --git a/akka-docs/disabled/spring.rst b/akka-docs/disabled/spring.rst new file mode 100644 index 0000000000..29bf4632cf --- /dev/null +++ b/akka-docs/disabled/spring.rst @@ -0,0 +1,335 @@ + +.. _spring-module: + +#################### + Spring Integration +#################### + +Module stability: **STABLE** + +Akkas integration with the `Spring Framework `_ supplies the Spring way of using the Typed Actor Java API and for CamelService configuration for :ref:`camel-spring-applications`. It uses Spring's custom namespaces to create Typed Actors, supervisor hierarchies and a CamelService in a Spring environment. + +Contents: + +.. contents:: :local: + +To use the custom name space tags for Akka you have to add the XML schema definition to your spring configuration. It is available at `http://akka.io/akka-1.0.xsd `_. The namespace for Akka is: + +.. code-block:: xml + + xmlns:akka="http://akka.io/schema/akka" + +Example header for Akka Spring configuration: + +.. code-block:: xml + + + + +- + +Actors +------ + +Actors in Java are created by extending the 'UntypedActor' class and implementing the 'onReceive' method. + +Example how to create Actors with the Spring framework: + +.. code-block:: xml + + + + + + +Supported scopes are singleton and prototype. Dependencies and properties are set with Springs ```` element. +A dependency can be either a ```` or a regular ````. + +Get the Actor from the Spring context: + +.. code-block:: java + + ApplicationContext context = new ClassPathXmlApplicationContext("akka-spring-config.xml"); + ActorRef actorRef = (ActorRef) context.getBean("myActor"); + +Typed Actors +------------ + +Here are some examples how to create Typed Actors with the Spring framework: + +Creating a Typed Actor: +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: xml + + + + + + + + +Supported scopes are singleton and prototype. Dependencies and properties are set with Springs ```` element. +A dependency can be either a ```` or a regular ````. + +Get the Typed Actor from the Spring context: + +.. code-block:: java + + ApplicationContext context = new ClassPathXmlApplicationContext("akka-spring-config.xml"); + MyPojo myPojo = (MyPojo) context.getBean("myActor"); + +Remote Actors +------------- + +For details on server managed and client managed remote actors see Remote Actor documentation. + +Configuration for a client managed remote Actor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:: + + + + + +The default for 'managed-by' is "client", so in the above example it could be left out. + +Configuration for a server managed remote Actor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Server side +*********** + +:: + + + + + + + + + + +If the server specified by 'host' and 'port' does not exist it will not be registered. + +Client side +*********** + +:: + + + + + +Configuration for a client managed remote Typed Actor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: xml + + + + + +Configuration for a server managed remote Typed Actor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Sever side setup +**************** + +:: + + + + + +Client side setup +***************** + +:: + + + + +Dispatchers +----------- + +Configuration for a Typed Actor or Untyped Actor with a custom dispatcher +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you don't want to use the default dispatcher you can define your own dispatcher in the spring configuration. For more information on dispatchers have a look at Dispatchers documentation. + +.. code-block:: xml + + + + + + + + + + + +If you want to or have to share the dispatcher between Actors you can define a dispatcher and reference it from the Typed Actor configuration: + +.. code-block:: xml + + + + + + + + + +The following dispatcher types are available in spring configuration: + +* executor-based-event-driven +* executor-based-event-driven-work-stealing +* thread-based + +The following queue types are configurable for dispatchers using thread pools: + +* bounded-linked-blocking-queue +* unbounded-linked-blocking-queue +* synchronous-queue +* bounded-array-blocking-queue + +If you have set up your IDE to be XSD-aware you can easily write your configuration through auto-completion. + +Stopping Typed Actors and Untyped Actors +---------------------------------------- + +Actors with scope singleton are stopped when the application context is closed. Actors with scope prototype must be stopped by the application. + +Supervisor Hierarchies +---------------------- + +The supervisor configuration in Spring follows the declarative configuration for the Java API. Have a look at Akka's approach to fault tolerance. + +Example spring supervisor configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: xml + + + + + + + java.io.IOException + + + + + + + + + + + + + + java.io.IOException + java.lang.NullPointerException + + + + + + + + + + +Get the TypedActorConfigurator from the Spring context +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: java + + TypedActorConfigurator myConfigurator = (TypedActorConfigurator) context.getBean("my-supervisor"); + MyPojo myPojo = (MyPOJO) myConfigurator.getInstance(MyPojo.class); + +Property Placeholders +--------------------- + +The Akka configuration can be made available as property placeholders by using a custom property placeholder configurer for Configgy: + +:: + + + + + + + +Camel configuration +------------------- + +For details refer to the :ref:`camel-module` documentation: + +* CamelService configuration for :ref:`camel-spring-applications` +* Access to Typed Actors :ref:`camel-typed-actors-using-spring` diff --git a/akka-docs/modules/camel.rst b/akka-docs/modules/camel.rst index 8b2b84c992..ecdd956724 100644 --- a/akka-docs/modules/camel.rst +++ b/akka-docs/modules/camel.rst @@ -5,2899 +5,5 @@ Camel ####### -For an introduction to akka-camel, see also the `Appendix E - Akka and Camel`_ -(pdf) of the book `Camel in Action`_. +The Akka Camel module has not been migrated to Akka 2.0-SNAPSHOT yet. -.. _Appendix E - Akka and Camel: http://www.manning.com/ibsen/appEsample.pdf -.. _Camel in Action: http://www.manning.com/ibsen/ - -Contents: - -.. contents:: :local: - -Other, more advanced external articles are: - -* `Akka Consumer Actors: New Features and Best Practices `_ -* `Akka Producer Actors: New Features and Best Practices `_ - - -Introduction -============ - -The akka-camel module allows actors, untyped actors, and typed actors to receive -and send messages over a great variety of protocols and APIs. This section gives -a brief overview of the general ideas behind the akka-camel module, the -remaining sections go into the details. In addition to the native Scala and Java -actor API, actors can now exchange messages with other systems over large number -of protocols and APIs such as HTTP, SOAP, TCP, FTP, SMTP or JMS, to mention a -few. At the moment, approximately 80 protocols and APIs are supported. - -The akka-camel module is based on `Apache Camel`_, a powerful and leight-weight -integration framework for the JVM. For an introduction to Apache Camel you may -want to read this `Apache Camel article`_. Camel comes with a -large number of `components`_ that provide bindings to different protocols and -APIs. The `camel-extra`_ project provides further components. - -.. _Apache Camel: http://camel.apache.org/ -.. _Apache Camel article: http://architects.dzone.com/articles/apache-camel-integration -.. _components: http://camel.apache.org/components.html -.. _camel-extra: http://code.google.com/p/camel-extra/ - -Usage of Camel's integration components in Akka is essentially a -one-liner. Here's an example. - -.. code-block:: scala - - import akka.actor.Actor - import akka.actor.Actor._ - import akka.camel.{Message, Consumer} - - class MyActor extends Actor with Consumer { - def endpointUri = "mina:tcp://localhost:6200?textline=true" - - def receive = { - case msg: Message => { /* ... */} - case _ => { /* ... */} - } - } - - // start and expose actor via tcp - val myActor = actorOf[MyActor] - -The above example exposes an actor over a tcp endpoint on port 6200 via Apache -Camel's `Mina component`_. The actor implements the endpointUri method to define -an endpoint from which it can receive messages. After starting the actor, tcp -clients can immediately send messages to and receive responses from that -actor. If the message exchange should go over HTTP (via Camel's `Jetty -component`_), only the actor's endpointUri method must be changed. - -.. _Mina component: http://camel.apache.org/mina.html -.. _Jetty component: http://camel.apache.org/jetty.html - -.. code-block:: scala - - class MyActor extends Actor with Consumer { - def endpointUri = "jetty:http://localhost:8877/example" - - def receive = { - case msg: Message => { /* ... */} - case _ => { /* ... */} - } - } - -Actors can also trigger message exchanges with external systems i.e. produce to -Camel endpoints. - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.{Producer, Oneway} - - class MyActor extends Actor with Producer with Oneway { - def endpointUri = "jms:queue:example" - } - -In the above example, any message sent to this actor will be added (produced) to -the example JMS queue. Producer actors may choose from the same set of Camel -components as Consumer actors do. - -The number of Camel components is constantly increasing. The akka-camel module -can support these in a plug-and-play manner. Just add them to your application's -classpath, define a component-specific endpoint URI and use it to exchange -messages over the component-specific protocols or APIs. This is possible because -Camel components bind protocol-specific message formats to a Camel-specific -`normalized message format`__. The normalized message format hides -protocol-specific details from Akka and makes it therefore very easy to support -a large number of protocols through a uniform Camel component interface. The -akka-camel module further converts mutable Camel messages into `immutable -representations`__ which are used by Consumer and Producer actors for pattern -matching, transformation, serialization or storage, for example. - -__ https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/Message.java -__ http://github.com/jboner/akka/blob/v0.8/akka-camel/src/main/scala/akka/Message.scala#L17 - - -Dependencies -============ - -Akka's Camel Integration consists of two modules - -* akka-camel - this module depends on akka-actor and camel-core (+ transitive - dependencies) and implements the Camel integration for (untyped) actors - -* akka-camel-typed - this module depends on akka-typed-actor and akka-camel (+ - transitive dependencies) and implements the Camel integration for typed actors - -The akka-camel-typed module is optional. To have both untyped and typed actors -working with Camel, add the following dependencies to your SBT project -definition. - -.. code-block:: scala - - import sbt._ - - class Project(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { - // ... - val akkaCamel = akkaModule("camel") - val akkaCamelTyped = akkaModule("camel-typed") // optional typed actor support - // ... - } - - -.. _camel-consume-messages: - -Consume messages -================ - -Actors (untyped) ----------------- - -For actors (Scala) to receive messages, they must mixin the `Consumer`_ -trait. For example, the following actor class (Consumer1) implements the -endpointUri method, which is declared in the Consumer trait, in order to receive -messages from the ``file:data/input/actor`` Camel endpoint. Untyped actors -(Java) need to extend the abstract UntypedConsumerActor class and implement the -getEndpointUri() and onReceive(Object) methods. - -.. _Consumer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Consumer.scala - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.{Message, Consumer} - - class Consumer1 extends Actor with Consumer { - def endpointUri = "file:data/input/actor" - - def receive = { - case msg: Message => println("received %s" format msg.bodyAs[String]) - } - } - -**Java** - -.. code-block:: java - - import akka.camel.Message; - import akka.camel.UntypedConsumerActor; - - public class Consumer1 extends UntypedConsumerActor { - public String getEndpointUri() { - return "file:data/input/actor"; - } - - public void onReceive(Object message) { - Message msg = (Message)message; - String body = msg.getBodyAs(String.class); - System.out.println(String.format("received %s", body)) - } - } - -Whenever a file is put into the data/input/actor directory, its content is -picked up by the Camel `file component`_ and sent as message to the -actor. Messages consumed by actors from Camel endpoints are of type -`Message`_. These are immutable representations of Camel messages. - -.. _file component: http://camel.apache.org/file2.html -.. _Message: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Message.scala - -For Message usage examples refer to the unit tests: - -* Message unit tests - `Scala API `_ -* Message unit tests - `Java API `_ - -Here's another example that sets the endpointUri to -``jetty:http://localhost:8877/camel/default``. It causes Camel's `Jetty -component`_ to start an embedded `Jetty`_ server, accepting HTTP connections -from localhost on port 8877. - -.. _Jetty component: http://camel.apache.org/jetty.html -.. _Jetty: http://www.eclipse.org/jetty/ - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.{Message, Consumer} - - class Consumer2 extends Actor with Consumer { - def endpointUri = "jetty:http://localhost:8877/camel/default" - - def receive = { - case msg: Message => self.reply("Hello %s" format msg.bodyAs[String]) - } - } - -**Java** - -.. code-block:: java - - import akka.camel.Message; - import akka.camel.UntypedConsumerActor; - - public class Consumer2 extends UntypedConsumerActor { - public String getEndpointUri() { - return "jetty:http://localhost:8877/camel/default"; - } - - public void onReceive(Object message) { - Message msg = (Message)message; - String body = msg.getBodyAs(String.class); - getContext().tryReply(String.format("Hello %s", body)); - } - } - -After starting the actor, clients can send messages to that actor by POSTing to -``http://localhost:8877/camel/default``. The actor sends a response by using the -self.reply method (Scala). For returning a message body and headers to the HTTP -client the response type should be `Message`_. For any other response type, a -new Message object is created by akka-camel with the actor response as message -body. - -.. _Message: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Message.scala - - -Typed actors ------------- - -Typed actors can also receive messages from Camel endpoints. In contrast to -(untyped) actors, which only implement a single receive or onReceive method, a -typed actor may define several (message processing) methods, each of which can -receive messages from a different Camel endpoint. For a typed actor method to be -exposed as Camel endpoint it must be annotated with the `@consume -annotation`_. For example, the following typed consumer actor defines two -methods, foo and bar. - -.. _@consume annotation: http://github.com/jboner/akka/blob/master/akka-camel/src/main/java/akka/camel/consume.java - -**Scala** - -.. code-block:: scala - - import org.apache.camel.{Body, Header} - import akka.actor.TypedActor - import akka.camel.consume - - trait TypedConsumer1 { - @consume("file:data/input/foo") - def foo(body: String): Unit - - @consume("jetty:http://localhost:8877/camel/bar") - def bar(@Body body: String, @Header("X-Whatever") header: String): String - } - - class TypedConsumer1Impl extends TypedActor with TypedConsumer1 { - def foo(body: String) = println("Received message: %s" format body) - def bar(body: String, header: String) = "body=%s header=%s" format (body, header) - } - -**Java** - -.. code-block:: java - - import org.apache.camel.Body; - import org.apache.camel.Header; - import akka.actor.TypedActor; - import akka.camel.consume; - - public interface TypedConsumer1 { - @consume("file:data/input/foo") - public void foo(String body); - - @consume("jetty:http://localhost:8877/camel/bar") - public String bar(@Body String body, @Header("X-Whatever") String header); - } - - public class TypedConsumer1Impl extends TypedActor implements TypedConsumer1 { - public void foo(String body) { - System.out.println(String.format("Received message: ", body)); - } - - public String bar(String body, String header) { - return String.format("body=%s header=%s", body, header); - } - } - -The foo method can be invoked by placing a file in the data/input/foo -directory. Camel picks up the file from this directory and akka-camel invokes -foo with the file content as argument (converted to a String). Camel -automatically tries to convert messages to appropriate types as defined by the -method parameter(s). The conversion rules are described in detail on the -following pages: - -* `Bean integration `_ -* `Bean binding `_ -* `Parameter binding `_ - -The bar method can be invoked by POSTing a message to -http://localhost:8877/camel/bar. Here, parameter binding annotations are used to -tell Camel how to extract data from the HTTP message. The @Body annotation binds -the HTTP request body to the first parameter, the @Header annotation binds the -X-Whatever header to the second parameter. The return value is sent as HTTP -response message body to the client. - -Parameter binding annotations must be placed on the interface, the @consume -annotation can also be placed on the methods in the implementation class. - - -.. _camel-publishing: - -Consumer publishing -------------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -Publishing a consumer actor at its Camel endpoint occurs when the actor is -started. Publication is done asynchronously; setting up an endpoint (more -precisely, the route from that endpoint to the actor) may still be in progress -after the ActorRef method returned. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor._ - - val actor = actorOf[Consumer1] // create Consumer actor and activate endpoint in background - -**Java** - -.. code-block:: java - - import static akka.actor.Actors.*; - import akka.actor.ActorRef; - - ActorRef actor = actorOf(Consumer1.class); // create Consumer actor and activate endpoint in background - - -Typed actors -^^^^^^^^^^^^ - -Publishing of typed actor methods is done when the typed actor is created with -one of the TypedActor.newInstance(..) methods. Publication is done in the -background here as well i.e. it may still be in progress when -TypedActor.newInstance(..) returns. - -**Scala** - -.. code-block:: scala - - import akka.actor.TypedActor - - // create TypedConsumer1 object and activate endpoint(s) in background - val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl]) - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - - // create TypedConsumer1 object and activate endpoint(s) in background - TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class); - - -.. _camel-consumers-and-camel-service: - -Consumers and the CamelService ------------------------------- - -Publishing of consumer actors or typed actor methods requires a running -CamelService. The Akka :ref:`microkernel` can start a CamelService automatically -(see :ref:`camel-configuration`). When using Akka in other environments, a -CamelService must be started manually. Applications can do that by calling the -CamelServiceManager.startCamelService method. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - startCamelService - -**Java** - -.. code-block:: java - - import static akka.camel.CamelServiceManager.*; - - startCamelService(); - -If applications need to wait for a certain number of consumer actors or typed -actor methods to be published they can do so with the -``CamelServiceManager.mandatoryService.awaitEndpointActivation`` method, where -``CamelServiceManager.mandatoryService`` is the current CamelService instance -(or throws an IllegalStateException there's no current CamelService). - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - startCamelService - - // Wait for three conumer endpoints to be activated - mandatoryService.awaitEndpointActivation(3) { - // Start three consumer actors (for example) - // ... - } - - // Communicate with consumer actors via their activated endpoints - // ... - -**Java** - -.. code-block:: java - - import akka.japi.SideEffect; - import static akka.camel.CamelServiceManager.*; - - startCamelService(); - - // Wait for three conumer endpoints to be activated - getMandatoryService().awaitEndpointActivation(3, new SideEffect() { - public void apply() { - // Start three consumer actors (for example) - // ... - } - }); - - // Communicate with consumer actors via their activated endpoints - // ... - -Alternatively, one can also use ``Option[CamelService]`` returned by -``CamelServiceManager.service``. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - startCamelService - - for(s <- service) s.awaitEndpointActivation(3) { - // ... - } - -**Java** - -.. code-block:: java - - import java.util.concurrent.CountDownLatch; - - import akka.camel.CamelService; - import static akka.camel.CamelServiceManager.*; - - startCamelService(); - - for (CamelService s : getService()) s.awaitEndpointActivation(3, new SideEffect() { - public void apply() { - // ... - } - }); - -:ref:`camel-configuration` additionally describes how a CamelContext, that is -managed by a CamelService, can be cutomized before starting the service. When -the CamelService is no longer needed, it should be stopped. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - stopCamelService - -**Java** - -.. code-block:: java - - import static akka.camel.CamelServiceManager.*; - - stopCamelService(); - - -.. _camel-unpublishing: - -Consumer un-publishing ----------------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -When an actor is stopped, the route from the endpoint to that actor is stopped -as well. For example, stopping an actor that has been previously published at -``http://localhost:8877/camel/test`` will cause a connection failure when trying -to access that endpoint. Stopping the route is done asynchronously; it may be -still in progress after the ``ActorRef.stop`` method returned. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor._ - - val actor = actorOf[Consumer1] // create Consumer actor - actor // activate endpoint in background - // ... - actor.stop // deactivate endpoint in background - -**Java** - -.. code-block:: java - - import static akka.actor.Actors.*; - import akka.actor.ActorRef; - - ActorRef actor = actorOf(Consumer1.class); // create Consumer actor and activate endpoint in background - // ... - actor.stop(); // deactivate endpoint in background - - -Typed actors -^^^^^^^^^^^^ - -When a typed actor is stopped, routes to @consume annotated methods of this -typed actors are stopped as well. Stopping the routes is done asynchronously; it -may be still in progress after the TypedActor.stop method returned. - -**Scala** - -.. code-block:: scala - - import akka.actor.TypedActor - - // create TypedConsumer1 object and activate endpoint(s) in background - val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl]) - - // deactivate endpoints in background - TypedActor.stop(consumer) - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - - // Create typed consumer actor and activate endpoints in background - TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class); - - // Deactivate endpoints in background - TypedActor.stop(consumer); - - -.. _camel-acknowledgements: - -Acknowledgements ----------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -With in-out message exchanges, clients usually know that a message exchange is -done when they receive a reply from a consumer actor. The reply message can be a -Message (or any object which is then internally converted to a Message) on -success, and a Failure message on failure. - -With in-only message exchanges, by default, an exchange is done when a message -is added to the consumer actor's mailbox. Any failure or exception that occurs -during processing of that message by the consumer actor cannot be reported back -to the endpoint in this case. To allow consumer actors to positively or -negatively acknowledge the receipt of a message from an in-only message -exchange, they need to override the ``autoack`` (Scala) or ``isAutoack`` (Java) -method to return false. In this case, consumer actors must reply either with a -special Ack message (positive acknowledgement) or a Failure (negative -acknowledgement). - -**Scala** - -.. code-block:: scala - - import akka.camel.{Ack, Failure} - // ... other imports omitted - - class Consumer3 extends Actor with Consumer { - override def autoack = false - - def endpointUri = "jms:queue:test" - - def receive = { - // ... - self.reply(Ack) // on success - // ... - self.reply(Failure(...)) // on failure - } - } - -**Java** - -.. code-block:: java - - import akka.camel.Failure - import static akka.camel.Ack.ack; - // ... other imports omitted - - public class Consumer3 extends UntypedConsumerActor { - - public String getEndpointUri() { - return "jms:queue:test"; - } - - public boolean isAutoack() { - return false; - } - - public void onReceive(Object message) { - // ... - getContext().reply(ack()) // on success - // ... - val e: Exception = ... - getContext().reply(new Failure(e)) // on failure - } - } - - -.. _camel-blocking-exchanges: - -Blocking exchanges ------------------- - -By default, message exchanges between a Camel endpoint and a consumer actor are -non-blocking because, internally, the ! (bang) operator is used to commicate -with the actor. The route to the actor does not block waiting for a reply. The -reply is sent asynchronously (see also :ref:`camel-asynchronous-routing`). -Consumer actors however can be configured to make this interaction blocking. - -**Scala** - -.. code-block:: scala - - class ExampleConsumer extends Actor with Consumer { - override def blocking = true - - def endpointUri = ... - def receive = { - // ... - } - } - -**Java** - -.. code-block:: java - - public class ExampleConsumer extends UntypedConsumerActor { - - public boolean isBlocking() { - return true; - } - - public String getEndpointUri() { - // ... - } - - public void onReceive(Object message) { - // ... - } - } - -In this case, the ``!!`` (bangbang) operator is used internally to communicate -with the actor which blocks a thread until the consumer sends a response or -throws an exception within receive. Although it may decrease scalability, this -setting can simplify error handling (see `this article`_) or allows timeout -configurations on actor-level (see :ref:`camel-timeout`). - -.. _this article: http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html - - -.. _camel-timeout: - -Consumer timeout ----------------- - -Endpoints that support two-way communications need to wait for a response from -an (untyped) actor or typed actor before returning it to the initiating client. -For some endpoint types, timeout values can be defined in an endpoint-specific -way which is described in the documentation of the individual `Camel -components`_. Another option is to configure timeouts on the level of consumer -actors and typed consumer actors. - -.. _Camel components: http://camel.apache.org/components.html - - -Typed actors -^^^^^^^^^^^^ - -For typed actors, timeout values for method calls that return a result can be -set when the typed actor is created. In the following example, the timeout is -set to 20 seconds (default is 5 seconds). - -**Scala** - -.. code-block:: scala - - import akka.actor.TypedActor - - val consumer = TypedActor.newInstance(classOf[TypedConsumer1], classOf[TypedConumer1Impl], 20000 /* 20 seconds */) - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - - TypedConsumer1 consumer = TypedActor.newInstance(TypedConsumer1.class, TypedConumer1Impl.class, 20000 /* 20 seconds */); - - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -Two-way communications between a Camel endpoint and an (untyped) actor are -initiated by sending the request message to the actor with the ``!`` (bang) -operator and the actor replies to the endpoint when the response is ready. In -order to support timeouts on actor-level, endpoints need to send the request -message with the ``!!`` (bangbang) operator for which a timeout value is -applicable. This can be achieved by overriding the Consumer.blocking method to -return true. - -**Scala** - -.. code-block:: scala - - class Consumer2 extends Actor with Consumer { - self.timeout = 20000 // timeout set to 20 seconds - - override def blocking = true - - def endpointUri = "direct:example" - - def receive = { - // ... - } - } - -**Java** - -.. code-block:: java - - public class Consumer2 extends UntypedConsumerActor { - - public Consumer2() { - getContext().setTimeout(20000); // timeout set to 20 seconds - } - - public String getEndpointUri() { - return "direct:example"; - } - - public boolean isBlocking() { - return true; - } - - public void onReceive(Object message) { - // ... - } - } - -This is a valid approach for all endpoint types that do not "natively" support -asynchronous two-way message exchanges. For all other endpoint types (like -`Jetty`_ endpoints) is it not recommended to switch to blocking mode but rather -to configure timeouts in an endpoint-specific way (see -also :ref:`camel-asynchronous-routing`). - - -Remote consumers ----------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -Publishing of remote consumer actors is always done on the server side, local -proxies are never published. Hence the CamelService must be started on the -remote node. For example, to publish an (untyped) actor on a remote node at -endpoint URI ``jetty:http://localhost:6644/remote-actor-1``, define the -following consumer actor class. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.annotation.consume - import akka.camel.Consumer - - class RemoteActor1 extends Actor with Consumer { - def endpointUri = "jetty:http://localhost:6644/remote-actor-1" - - protected def receive = { - case msg => self.reply("response from remote actor 1") - } - } - -**Java** - -.. code-block:: java - - import akka.camel.UntypedConsumerActor; - - public class RemoteActor1 extends UntypedConsumerActor { - public String getEndpointUri() { - return "jetty:http://localhost:6644/remote-actor-1"; - } - - public void onReceive(Object message) { - getContext().tryReply("response from remote actor 1"); - } - } - -On the remote node, start a `CamelService`_, start a remote server, create the -actor and register it at the remote server. - -.. _CamelService: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/CamelService.scala - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - import akka.actor.Actor._ - import akka.actor.ActorRef - - // ... - startCamelService - - val consumer = val consumer = actorOf[RemoteActor1] - - remote.start("localhost", 7777) - remote.register(consumer) // register and start remote consumer - // ... - -**Java** - -.. code-block:: java - - import akka.camel.CamelServiceManager; - import static akka.actor.Actors.*; - - // ... - CamelServiceManager.startCamelService(); - - ActorRef actor = actorOf(RemoteActor1.class); - - remote().start("localhost", 7777); - remote().register(actor); // register and start remote consumer - // ... - -Explicitly starting a CamelService can be omitted when Akka is running in Kernel -mode, for example (see also :ref:`camel-configuration`). - - -Typed actors -^^^^^^^^^^^^ - -Remote typed consumer actors can be registered with one of the -``registerTyped*`` methods on the remote server. The following example registers -the actor with the custom id "123". - -**Scala** - -.. code-block:: scala - - import akka.actor.TypedActor - - // ... - val obj = TypedActor.newRemoteInstance( - classOf[SampleRemoteTypedConsumer], - classOf[SampleRemoteTypedConsumerImpl]) - - remote.registerTypedActor("123", obj) - // ... - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - - SampleRemoteTypedConsumer obj = (SampleRemoteTypedConsumer)TypedActor.newInstance( - SampleRemoteTypedConsumer.class, - SampleRemoteTypedConsumerImpl.class); - - remote.registerTypedActor("123", obj) - // ... - - -Produce messages -================ - -A minimum pre-requisite for producing messages to Camel endpoints with producer -actors (see below) is an initialized and started CamelContextManager. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelContextManager - - CamelContextManager.init // optionally takes a CamelContext as argument - CamelContextManager.start // starts the managed CamelContext - -**Java** - -.. code-block:: java - - import akka.camel.CamelContextManager; - - CamelContextManager.init(); // optionally takes a CamelContext as argument - CamelContextManager; // starts the managed CamelContext - -For using producer actors, application may also start a CamelService. This will -not only setup a CamelContextManager behind the scenes but also register -listeners at the actor registry (needed to publish consumer actors). If your -application uses producer actors only and you don't want to have the (very -small) overhead generated by the registry listeners then setting up a -CamelContextManager without starting CamelService is recommended. Otherwise, -just start a CamelService as described for consumer -actors: :ref:`camel-consumers-and-camel-service`. - - -Producer trait --------------- - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -For sending messages to Camel endpoints, actors - -* written in Scala need to mixin the `Producer`_ trait and implement the - endpointUri method. - -* written in Java need to extend the abstract UntypedProducerActor class and - implement the getEndpointUri() method. By extending the UntypedProducerActor - class, untyped actors (Java) inherit the behaviour of the Producer trait. - -.. _Producer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Producer.scala - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.Producer - - class Producer1 extends Actor with Producer { - def endpointUri = "http://localhost:8080/news" - } - -**Java** - -.. code-block:: java - - import akka.camel.UntypedProducerActor; - - public class Producer1 extends UntypedProducerActor { - public String getEndpointUri() { - return "http://localhost:8080/news"; - } - } - -Producer1 inherits a default implementation of the receive method from the -Producer trait. To customize a producer actor's default behavior it is -recommended to override the Producer.receiveBeforeProduce and -Producer.receiveAfterProduce methods. This is explained later in more detail. -Actors should not override the default Producer.receive method. - -Any message sent to a Producer actor (or UntypedProducerActor) will be sent to -the associated Camel endpoint, in the above example to -``http://localhost:8080/news``. Response messages (if supported by the -configured endpoint) will, by default, be returned to the original sender. The -following example uses the ``?`` operator (Scala) to send a message to a -Producer actor and waits for a response. In Java, the sendRequestReply method is -used. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor._ - import akka.actor.ActorRef - - val producer = actorOf[Producer1] - val response = (producer ? "akka rocks").get - val body = response.bodyAs[String] - -**Java** - -.. code-block:: java - - import akka.actor.ActorRef; - import static akka.actor.Actors.*; - import akka.camel.Message; - - ActorRef producer = actorOf(Producer1.class); - Message response = (Message)producer.sendRequestReply("akka rocks"); - String body = response.getBodyAs(String.class) - -If the message is sent using the ! operator (or the tell method in Java) -then the response message is sent back asynchronously to the original sender. In -the following example, a Sender actor sends a message (a String) to a producer -actor using the ! operator and asynchronously receives a response (of type -Message). - -**Scala** - -.. code-block:: scala - - import akka.actor.{Actor, ActorRef} - import akka.camel.Message - - class Sender(producer: ActorRef) extends Actor { - def receive = { - case request: String => producer ! request - case response: Message => { - /* process response ... */ - } - // ... - } - } - -**Java** - -.. code-block:: java - - // TODO - - -.. _camel-custom-processing: - -Custom Processing -^^^^^^^^^^^^^^^^^ - -Instead of replying to the initial sender, producer actors can implement custom -reponse processing by overriding the receiveAfterProduce method (Scala) or -onReceiveAfterProduce method (Java). In the following example, the reponse -message is forwarded to a target actor instead of being replied to the original -sender. - -**Scala** - -.. code-block:: scala - - import akka.actor.{Actor, ActorRef} - import akka.camel.Producer - - class Producer1(target: ActorRef) extends Actor with Producer { - def endpointUri = "http://localhost:8080/news" - - override protected def receiveAfterProduce = { - // do not reply but forward result to target - case msg => target forward msg - } - } - -**Java** - -.. code-block:: java - - import akka.actor.ActorRef; - import akka.camel.UntypedProducerActor; - - public class Producer1 extends UntypedProducerActor { - private ActorRef target; - - public Producer1(ActorRef target) { - this.target = target; - } - - public String getEndpointUri() { - return "http://localhost:8080/news"; - } - - @Override - public void onReceiveAfterProduce(Object message) { - target.forward((Message)message, getContext()); - } - } - -To create an untyped actor instance with a constructor argument, a factory is -needed (this should be doable without a factory in upcoming Akka versions). - -.. code-block:: java - - import akka.actor.ActorRef; - import akka.actor.UntypedActorFactory; - import akka.actor.UntypedActor; - - public class Producer1Factory implements UntypedActorFactory { - - private ActorRef target; - - public Producer1Factory(ActorRef target) { - this.target = target; - } - - public UntypedActor create() { - return new Producer1(target); - } - } - -The instanitation is done with the Actors.actorOf method and the factory as -argument. - -.. code-block:: java - - import static akka.actor.Actors.*; - import akka.actor.ActorRef; - - ActorRef target = ... - ActorRef producer = actorOf(new Producer1Factory(target)); - producer; - -Before producing messages to endpoints, producer actors can pre-process them by -overriding the receiveBeforeProduce method (Scala) or onReceiveBeforeProduce -method (Java). - -**Scala** - -.. code-block:: scala - - import akka.actor.{Actor, ActorRef} - import akka.camel.{Message, Producer} - - class Producer1(target: ActorRef) extends Actor with Producer { - def endpointUri = "http://localhost:8080/news" - - override protected def receiveBeforeProduce = { - case msg: Message => { - // do some pre-processing (e.g. add endpoint-specific message headers) - // ... - - // and return the modified message - msg - } - } - } - -**Java** - -.. code-block:: java - - import akka.actor.ActorRef; - import akka.camel.Message - import akka.camel.UntypedProducerActor; - - public class Producer1 extends UntypedProducerActor { - private ActorRef target; - - public Producer1(ActorRef target) { - this.target = target; - } - - public String getEndpointUri() { - return "http://localhost:8080/news"; - } - - @Override - public Object onReceiveBeforeProduce(Object message) { - Message msg = (Message)message; - // do some pre-processing (e.g. add endpoint-specific message headers) - // ... - - // and return the modified message - return msg - } - } - - -Producer configuration options -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The interaction of producer actors with Camel endpoints can be configured to be -one-way or two-way (by initiating in-only or in-out message exchanges, -respectively). By default, the producer initiates an in-out message exchange -with the endpoint. For initiating an in-only exchange, producer actors - -* written in Scala either have to override the oneway method to return true -* written in Java have to override the isOneway method to return true. - -**Scala** - -.. code-block:: scala - - import akka.camel.Producer - - class Producer2 extends Actor with Producer { - def endpointUri = "jms:queue:test" - override def oneway = true - } - -**Java** - -.. code-block:: java - - import akka.camel.UntypedProducerActor; - - public class SampleUntypedReplyingProducer extends UntypedProducerActor { - public String getEndpointUri() { - return "jms:queue:test"; - } - - @Override - public boolean isOneway() { - return true; - } - } - -Message correlation -^^^^^^^^^^^^^^^^^^^ - -To correlate request with response messages, applications can set the -Message.MessageExchangeId message header. - -**Scala** - -.. code-block:: scala - - import akka.camel.Message - - producer ! Message("bar", Map(Message.MessageExchangeId -> "123")) - -**Java** - -.. code-block:: java - - // TODO - -Responses of type Message or Failure will contain that header as well. When -receiving messages from Camel endpoints this message header is already set (see -:ref:`camel-consume-messages`). - - -Matching responses -^^^^^^^^^^^^^^^^^^ - -The following code snippet shows how to best match responses when sending -messages with the ``?`` operator (Scala) or with the ``ask`` method -(Java). - -**Scala** - -.. code-block:: scala - - val response = (producer ? message).get - - response match { - case Some(Message(body, headers)) => ... - case Some(Failure(exception, headers)) => ... - case _ => ... - } - -**Java** - -.. code-block:: java - - // TODO - - -ProducerTemplate ----------------- - -The `Producer`_ trait (and the abstract UntypedProducerActor class) is a very -convenient way for actors to produce messages to Camel endpoints. (Untyped) -actors and typed actors may also use a Camel `ProducerTemplate`_ for producing -messages to endpoints. For typed actors it's the only way to produce messages to -Camel endpoints. - -At the moment, only the Producer trait fully supports asynchronous in-out -message exchanges with Camel endpoints without allocating a thread for the full -duration of the exchange. For example, when using endpoints that support -asynchronous message exchanges (such as Jetty endpoints that internally use -`Jetty's asynchronous HTTP client`_) then usage of the Producer trait is highly -recommended (see also :ref:`camel-asynchronous-routing`). - -.. _Producer: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/Producer.scala -.. _ProducerTemplate: http://camel.apache.org/maven/camel-2.2.0/camel-core/apidocs/index.html -.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient - - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -A managed ProducerTemplate instance can be obtained via -CamelContextManager.mandatoryTemplate. In the following example, an actor uses a -ProducerTemplate to send a one-way message to a ``direct:news`` endpoint. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.CamelContextManager - - class ProducerActor extends Actor { - protected def receive = { - // one-way message exchange with direct:news endpoint - case msg => CamelContextManager.mandatoryTemplate.sendBody("direct:news", msg) - } - } - -**Java** - -.. code-block:: java - - import akka.actor.UntypedActor; - import akka.camel.CamelContextManager; - - public class SampleUntypedActor extends UntypedActor { - public void onReceive(Object msg) { - CamelContextManager.getMandatoryTemplate().sendBody("direct:news", msg); - } - } - -Alternatively, one can also use ``Option[ProducerTemplate]`` returned by -``CamelContextManager.template``. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.CamelContextManager - - class ProducerActor extends Actor { - protected def receive = { - // one-way message exchange with direct:news endpoint - case msg => for(t <- CamelContextManager.template) t.sendBody("direct:news", msg) - } - } - -**Java** - -.. code-block:: java - - import org.apache.camel.ProducerTemplate - - import akka.actor.UntypedActor; - import akka.camel.CamelContextManager; - - public class SampleUntypedActor extends UntypedActor { - public void onReceive(Object msg) { - for (ProducerTemplate t : CamelContextManager.getTemplate()) { - t.sendBody("direct:news", msg); - } - } - } - -For initiating a a two-way message exchange, one of the -``ProducerTemplate.request*`` methods must be used. - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.CamelContextManager - - class ProducerActor extends Actor { - protected def receive = { - // two-way message exchange with direct:news endpoint - case msg => self.reply(CamelContextManager.mandatoryTemplate.requestBody("direct:news", msg)) - } - } - -**Java** - -.. code-block:: java - - import akka.actor.UntypedActor; - import akka.camel.CamelContextManager; - - public class SampleUntypedActor extends UntypedActor { - public void onReceive(Object msg) { - getContext().tryReply(CamelContextManager.getMandatoryTemplate().requestBody("direct:news", msg)); - } - } - - -Typed actors -^^^^^^^^^^^^ - -Typed Actors get access to a managed ProducerTemplate in the same way, as shown -in the next example. - -**Scala** - -.. code-block:: scala - - // TODO - -**Java** - -.. code-block:: java - - import akka.actor.TypedActor; - import akka.camel.CamelContextManager; - - public class SampleProducerImpl extends TypedActor implements SampleProducer { - public void foo(String msg) { - ProducerTemplate template = CamelContextManager.getMandatoryTemplate(); - template.sendBody("direct:news", msg); - } - } - - -.. _camel-asynchronous-routing: - -Asynchronous routing -==================== - -Since Akka 0.10, in-out message exchanges between endpoints and actors are -designed to be asynchronous. This is the case for both, consumer and producer -actors. - -* A consumer endpoint sends request messages to its consumer actor using the ``!`` - (bang) operator and the actor returns responses with self.reply once they are - ready. The sender reference used for reply is an adapter to Camel's asynchronous - routing engine that implements the ActorRef trait. - -* A producer actor sends request messages to its endpoint using Camel's - asynchronous routing engine. Asynchronous responses are wrapped and added to the - producer actor's mailbox for later processing. By default, response messages are - returned to the initial sender but this can be overridden by Producer - implementations (see also description of the ``receiveAfterProcessing`` method - in :ref:`camel-custom-processing`). - -However, asynchronous two-way message exchanges, without allocating a thread for -the full duration of exchange, cannot be generically supported by Camel's -asynchronous routing engine alone. This must be supported by the individual -`Camel components`_ (from which endpoints are created) as well. They must be -able to suspend any work started for request processing (thereby freeing threads -to do other work) and resume processing when the response is ready. This is -currently the case for a `subset of components`_ such as the `Jetty component`_. -All other Camel components can still be used, of course, but they will cause -allocation of a thread for the duration of an in-out message exchange. There's -also a :ref:`camel-async-example` that implements both, an asynchronous -consumer and an asynchronous producer, with the jetty component. - -.. _Camel components: http://camel.apache.org/components.html -.. _subset of components: http://camel.apache.org/asynchronous-routing-engine.html -.. _Jetty component: http://camel.apache.org/jetty.html - - -Fault tolerance -=============== - -Consumer actors and typed actors can be also managed by supervisors. If a -consumer is configured to be restarted upon failure the associated Camel -endpoint is not restarted. It's behaviour during restart is as follows. - -* A one-way (in-only) message exchange will be queued by the consumer and - processed once restart completes. - -* A two-way (in-out) message exchange will wait and either succeed after restart - completes or time-out when the restart duration exceeds - the :ref:`camel-timeout`. - -If a consumer is configured to be shut down upon failure, the associated -endpoint is shut down as well. For details refer to :ref:`camel-unpublishing`. - -For examples, tips and trick how to implement fault-tolerant consumer and -producer actors, take a look at these two articles. - -* `Akka Consumer Actors: New Features and Best Practices `_ -* `Akka Producer Actors: New Features and Best Practices `_ - - -.. _camel-configuration: - -CamelService configuration -========================== - -For publishing consumer actors and typed actor methods -(:ref:`camel-publishing`), applications must start a CamelService. When starting -Akka in :ref:`microkernel` mode then a CamelService can be started automatically -when camel is added to the enabled-modules list in :ref:`configuration`, for example: - -.. code-block:: none - - akka { - ... - enabled-modules = ["camel"] # Options: ["remote", "camel", "http"] - ... - } - -Applications that do not use the Akka Kernel, such as standalone applications -for example, need to start a CamelService manually, as explained in the -following subsections.When starting a CamelService manually, settings in -:ref:`configuration` are ignored. - - -Standalone applications ------------------------ - -Standalone application should create and start a CamelService in the following way. - -**Scala** - -.. code-block:: scala - - import akka.camel.CamelServiceManager._ - - startCamelService - -**Java** - -.. code-block:: java - - import static akka.camel.CamelServiceManager.*; - - startCamelService(); - -Internally, a CamelService uses the CamelContextManager singleton to manage a -CamelContext. A CamelContext manages the routes from endpoints to consumer -actors and typed actors. These routes are added and removed at runtime (when -(untyped) consumer actors and typed consumer actors are started and stopped). -Applications may additionally want to add their own custom routes or modify the -CamelContext in some other way. This can be done by initializing the -CamelContextManager manually and making modifications to CamelContext **before** -the CamelService is started. - -**Scala** - -.. code-block:: scala - - import org.apache.camel.builder.RouteBuilder - - import akka.camel.CamelContextManager - import akka.camel.CamelServiceManager._ - - CamelContextManager.init - - // add a custom route to the managed CamelContext - CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) - - startCamelService - - // an application-specific route builder - class CustomRouteBuilder extends RouteBuilder { - def configure { - // ... - } - } - -**Java** - -.. code-block:: java - - import org.apache.camel.builder.RouteBuilder; - - import akka.camel.CamelContextManager; - import static akka.camel.CamelServiceManager.*; - - CamelContextManager.init(); - - // add a custom route to the managed CamelContext - CamelContextManager.getMandatoryContext().addRoutes(new CustomRouteBuilder()); - - startCamelService(); - - // an application-specific route builder - private static class CustomRouteBuilder extends RouteBuilder { - public void configure() { - // ... - } - } - - -Applications may even provide their own CamelContext instance as argument to the -init method call as shown in the following snippet. Here, a DefaultCamelContext -is created using a Spring application context as `registry`_. - -.. _registry: http://camel.apache.org/registry.html - - -**Scala** - -.. code-block:: scala - - import org.apache.camel.impl.DefaultCamelContext - import org.apache.camel.spring.spi.ApplicationContextRegistry - import org.springframework.context.support.ClassPathXmlApplicationContext - - import akka.camel.CamelContextManager - import akka.camel.CamelServiceManager._ - - // create a custom Camel registry backed up by a Spring application context - val context = new ClassPathXmlApplicationContext("/context.xml") - val registry = new ApplicationContextRegistry(context) - - // initialize CamelContextManager with a DefaultCamelContext using the custom registry - CamelContextManager.init(new DefaultCamelContext(registry)) - - // ... - - startCamelService - -**Java** - -.. code-block:: java - - import org.apache.camel.impl.DefaultCamelContext - import org.apache.camel.spi.Registry; - import org.apache.camel.spring.spi.ApplicationContextRegistry; - - import org.springframework.context.ApplicationContext; - import org.springframework.context.support.ClassPathXmlApplicationContext; - - import akka.camel.CamelContextManager; - import static akka.camel.CamelServiceManager.*; - - // create a custom Camel registry backed up by a Spring application context - ApplicationContext context = new ClassPathXmlApplicationContext("/context.xml"); - Registry registry = new ApplicationContextRegistry(context); - - // initialize CamelContextManager with a DefaultCamelContext using the custom registry - CamelContextManager.init(new DefaultCamelContext(registry)); - - // ... - - startCamelService(); - - -.. _camel-spring-applications: - -Standalone Spring applications ------------------------------- - -A better approach to configure a Spring application context as registry for the -CamelContext is to use `Camel's Spring support`_. Furthermore, -the :ref:`spring-module` module additionally supports a element -for creating and starting a CamelService. An optional reference to a custom -CamelContext can be defined for as well. Here's an example. - -.. _Camel's Spring support: http://camel.apache.org/spring.html - -.. code-block:: xml - - - - - - - - - - - - - - - - - -Creating a CamelContext this way automatically adds the defining Spring -application context as registry to that CamelContext. The CamelService is -started when the application context is started and stopped when the application -context is closed. A simple usage example is shown in the following snippet. - -**Scala** - -.. code-block:: scala - - import org.springframework.context.support.ClassPathXmlApplicationContext - import akka.camel.CamelContextManager - - // Create and start application context (start CamelService) - val appctx = new ClassPathXmlApplicationContext("/context.xml") - - // Access to CamelContext (SpringCamelContext) - val ctx = CamelContextManager.mandatoryContext - // Access to ProducerTemplate of that CamelContext - val tpl = CamelContextManager.mandatoryTemplate - - // use ctx and tpl ... - - // Close application context (stop CamelService) - appctx.close - -**Java** - -.. code-block:: java - - // TODO - - -If the CamelService doesn't reference a custom CamelContext then a -DefaultCamelContext is created (and accessible via the CamelContextManager). - -.. code-block:: xml - - - - - - - - - -Kernel mode ------------ - -For classes that are loaded by the Kernel or the Initializer, starting the -CamelService can be omitted, as discussed in the previous section. Since these -classes are loaded and instantiated before the CamelService is started (by -Akka), applications can make modifications to a CamelContext here as well (and -even provide their own CamelContext). Assuming there's a boot class -sample.camel.Boot configured in :ref:`configuration`. - -.. code-block:: none - - akka { - ... - boot = ["sample.camel.Boot"] - ... - } - -Modifications to the CamelContext can be done like in the following snippet. - -**Scala** - -.. code-block:: scala - - package sample.camel - - import org.apache.camel.builder.RouteBuilder - - import akka.camel.CamelContextManager - - class Boot { - CamelContextManager.init - - // Customize CamelContext with application-specific routes - CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) - - // No need to start CamelService here. It will be started - // when this classes has been loaded and instantiated. - } - - class CustomRouteBuilder extends RouteBuilder { - def configure { - // ... - } - } - -**Java** - -.. code-block:: java - - // TODO - - -Custom Camel routes -=================== - -In all the examples so far, routes to consumer actors have been automatically -constructed by akka-camel, when the actor was started. Although the default -route construction templates, used by akka-camel internally, are sufficient for -most use cases, some applications may require more specialized routes to actors. -The akka-camel module provides two mechanisms for customizing routes to actors, -which will be explained in this section. These are - -* Usage of :ref:`camel-components` to access (untyped) actor and actors. - Any Camel route can use these components to access Akka actors. - -* :ref:`camel-intercepting-route-construction` to (untyped) actor and actors. - Default routes to consumer actors are extended using predefined extension - points. - - -.. _camel-components: - -Akka Camel components ---------------------- - -Akka actors can be access from Camel routes using the `actor`_ and -`typed-actor`_ Camel components, respectively. These components can be used to -access any Akka actor (not only consumer actors) from Camel routes, as described -in the following sections. - -.. _actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/ActorComponent.scala -.. _typed-actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/TypedActorComponent.scala - - -Access to actors ----------------- - -To access (untyped) actors from custom Camel routes, the `actor`_ Camel -component should be used. It fully supports Camel's `asynchronous routing -engine`_. - -.. _actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/ActorComponent.scala -.. _asynchronous routing engine: http://camel.apache.org/asynchronous-routing-engine.html - -This component accepts the following enpoint URI formats: - -* ``actor:[?]`` -* ``actor:id:[][?]`` -* ``actor:uuid:[][?]`` - -where ```` and ```` refer to ``actorRef.id`` and the -String-representation of ``actorRef.uuid``, respectively. The ```` are -name-value pairs separated by ``&`` (i.e. ``name1=value1&name2=value2&...``). - - -URI options -^^^^^^^^^^^ - -The following URI options are supported: - -+----------+---------+---------+-------------------------------------------+ -| Name | Type | Default | Description | -+==========+=========+=========+===========================================+ -| blocking | Boolean | false | If set to true, in-out message exchanges | -| | | | with the target actor will be made with | -| | | | the ``!!`` operator, otherwise with the | -| | | | ``!`` operator. | -| | | | | -| | | | See also :ref:`camel-timeout`. | -+----------+---------+---------+-------------------------------------------+ -| autoack | Boolean | true | If set to true, in-only message exchanges | -| | | | are auto-acknowledged when the message is | -| | | | added to the actor's mailbox. If set to | -| | | | false, actors must acknowledge the | -| | | | receipt of the message. | -| | | | | -| | | | See also :ref:`camel-acknowledgements`. | -+----------+---------+---------+-------------------------------------------+ - -Here's an actor endpoint URI example containing an actor uuid:: - - actor:uuid:12345678?blocking=true - -In actor endpoint URIs that contain id: or uuid:, an actor identifier (id or -uuid) is optional. In this case, the in-message of an exchange produced to an -actor endpoint must contain a message header with name CamelActorIdentifier -(which is defined by the ActorComponent.ActorIdentifier field) and a value that -is the target actor's identifier. On the other hand, if the URI contains an -actor identifier, it can be seen as a default actor identifier that can be -overridden by messages containing a CamelActorIdentifier header. - - -Message headers -^^^^^^^^^^^^^^^ - -+----------------------+--------+-------------------------------------------+ -| Name | Type | Description | -+======================+========+===========================================+ -| CamelActorIdentifier | String | Contains the identifier (id or uuid) of | -| | | the actor to route the message to. The | -| | | identifier is interpreted as actor id if | -| | | the URI contains id:, the identifier is | -| | | interpreted as uuid id the URI contains | -| | | uuid:. A uuid value may also be of type | -| | | Uuid (not only String). The header name | -| | | is defined by the | -| | | ActorComponent.ActorIdentifier field. | -+----------------------+--------+-------------------------------------------+ - -Here's another actor endpoint URI example that doesn't define an actor uuid. In -this case the target actor uuid must be defined by the CamelActorIdentifier -message header:: - - actor:uuid: - -In the following example, a custom route to an actor is created, using the -actor's uuid (i.e. actorRef.uuid). The route starts from a `Jetty`_ endpoint and -ends at the target actor. - - -**Scala** - -.. code-block:: scala - - import org.apache.camel.builder.RouteBuilder - - import akka.actor._ - import akka.actor.Actor - import akka.actor.Actor._ - import akka.camel.{Message, CamelContextManager, CamelServiceManager} - - object CustomRouteExample extends Application { - val target = actorOf[CustomRouteTarget] - - CamelServiceManager.startCamelService - CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder(target.uuid)) - } - - class CustomRouteTarget extends Actor { - def receive = { - case msg: Message => self.reply("Hello %s" format msg.bodyAs[String]) - } - } - - class CustomRouteBuilder(uuid: Uuid) extends RouteBuilder { - def configure { - val actorUri = "actor:uuid:%s" format uuid - from("jetty:http://localhost:8877/camel/custom").to(actorUri) - } - } - - -**Java** - -.. code-block:: java - - import com.eaio.uuid.UUID; - - import org.apache.camel.builder.RouteBuilder; - import static akka.actor.Actors.*; - import akka.actor.ActorRef; - import akka.actor.UntypedActor; - import akka.camel.CamelServiceManager; - import akka.camel.CamelContextManager; - import akka.camel.Message; - - public class CustomRouteExample { - public static void main(String... args) throws Exception { - ActorRef target = actorOf(CustomRouteTarget.class); - CamelServiceManager.startCamelService(); - CamelContextManager.getMandatoryContext().addRoutes(new CustomRouteBuilder(target.getUuid())); - } - } - - public class CustomRouteTarget extends UntypedActor { - public void onReceive(Object message) { - Message msg = (Message) message; - String body = msg.getBodyAs(String.class); - getContext().tryReply(String.format("Hello %s", body)); - } - } - - public class CustomRouteBuilder extends RouteBuilder { - private UUID uuid; - - public CustomRouteBuilder(UUID uuid) { - this.uuid = uuid; - } - - public void configure() { - String actorUri = String.format("actor:uuid:%s", uuid); - from("jetty:http://localhost:8877/camel/custom").to(actorUri); - } - } - -When the example is started, messages POSTed to -``http://localhost:8877/camel/custom`` are routed to the target actor. - - -Access to typed actors ----------------------- - -To access typed actor methods from custom Camel routes, the `typed-actor`_ Camel -component should be used. It is a specialization of the Camel `bean`_ component. -Applications should use the interface (endpoint URI syntax and options) as -described in the bean component documentation but with the typed-actor schema. -Typed Actors must be added to a `Camel registry`_ for being accessible by the -typed-actor component. - -.. _typed-actor: http://github.com/jboner/akka/blob/master/akka-camel/src/main/scala/akka/camel/component/TypedActorComponent.scala -.. _bean: http://camel.apache.org/bean.html -.. _Camel registry: http://camel.apache.org/registry.html - - -.. _camel-typed-actors-using-spring: - -Using Spring -^^^^^^^^^^^^ - -The following example shows how to access typed actors in a Spring application -context. For adding typed actors to the application context and for starting -:ref:`camel-spring-applications` the :ref:`spring-module` module is used in the -following example. It offers a ```` element to define typed actor -factory beans and a ```` element to create and start a -CamelService. - -.. code-block:: xml - - - - - - - - - - - - - - - - - -SampleTypedActor is the typed actor interface and SampleTypedActorImpl in the -typed actor implementation class. - -**Scala** - -.. code-block:: scala - - package sample - - import akka.actor.TypedActor - - trait SampleTypedActor { - def foo(s: String): String - } - - class SampleTypedActorImpl extends TypedActor with SampleTypedActor { - def foo(s: String) = "hello %s" format s - } - -**Java** - -.. code-block:: java - - package sample; - - import akka.actor.TypedActor; - - public interface SampleTypedActor { - public String foo(String s); - } - - public class SampleTypedActorImpl extends TypedActor implements SampleTypedActor { - - public String foo(String s) { - return "hello " + s; - } - } - -The SampleRouteBuilder defines a custom route from the direct:test endpoint to -the sample typed actor using a typed-actor endpoint URI. - -**Scala** - -.. code-block:: scala - - package sample - - import org.apache.camel.builder.RouteBuilder - - class SampleRouteBuilder extends RouteBuilder { - def configure = { - // route to typed actor - from("direct:test").to("typed-actor:sample?method=foo") - } - } - -**Java** - -.. code-block:: java - - package sample; - - import org.apache.camel.builder.RouteBuilder; - - public class SampleRouteBuilder extends RouteBuilder { - public void configure() { - // route to typed actor - from("direct:test").to("typed-actor:sample?method=foo"); - } - } - -The typed-actor endpoint URI syntax is::: - - typed-actor:?method= - -where ```` is the id of the bean in the Spring application context and -```` is the name of the typed actor method to invoke. - -Usage of the custom route for sending a message to the typed actor is shown in -the following snippet. - -**Scala** - -.. code-block:: scala - - package sample - - import org.springframework.context.support.ClassPathXmlApplicationContext - import akka.camel.CamelContextManager - - // load Spring application context (starts CamelService) - val appctx = new ClassPathXmlApplicationContext("/context-standalone.xml") - - // access 'sample' typed actor via custom route - assert("hello akka" == CamelContextManager.mandatoryTemplate.requestBody("direct:test", "akka")) - - // close Spring application context (stops CamelService) - appctx.close - -**Java** - -.. code-block:: java - - package sample; - - import org.springframework.context.support.ClassPathXmlApplicationContext; - import akka.camel.CamelContextManager; - - // load Spring application context - ClassPathXmlApplicationContext appctx = new ClassPathXmlApplicationContext("/context-standalone.xml"); - - // access 'externally' registered typed actors with typed-actor component - assert("hello akka" == CamelContextManager.getMandatoryTemplate().requestBody("direct:test", "akka")); - - // close Spring application context (stops CamelService) - appctx.close(); - -The application uses a Camel `producer template`_ to access the typed actor via -the ``direct:test`` endpoint. - -.. _producer template: http://camel.apache.org/producertemplate.html - - -Without Spring -^^^^^^^^^^^^^^ - -Usage of :ref:`spring-module` for adding typed actors to the Camel registry and -starting a CamelService is optional. Setting up a Spring-less application for -accessing typed actors is shown in the next example. - -**Scala** - -.. code-block:: scala - - package sample - - import org.apache.camel.impl.{DefaultCamelContext, SimpleRegistry} - import akka.actor.TypedActor - import akka.camel.CamelContextManager - import akka.camel.CamelServiceManager._ - - // register typed actor - val registry = new SimpleRegistry - registry.put("sample", TypedActor.newInstance(classOf[SampleTypedActor], classOf[SampleTypedActorImpl])) - - // customize CamelContext - CamelContextManager.init(new DefaultCamelContext(registry)) - CamelContextManager.mandatoryContext.addRoutes(new SampleRouteBuilder) - - startCamelService - - // access 'sample' typed actor via custom route - assert("hello akka" == CamelContextManager.mandatoryTemplate.requestBody("direct:test", "akka")) - - stopCamelService - -**Java** - -.. code-block:: java - - package sample; - - // register typed actor - SimpleRegistry registry = new SimpleRegistry(); - registry.put("sample", TypedActor.newInstance(SampleTypedActor.class, SampleTypedActorImpl.class)); - - // customize CamelContext - CamelContextManager.init(new DefaultCamelContext(registry)); - CamelContextManager.getMandatoryContext().addRoutes(new SampleRouteBuilder()); - - startCamelService(); - - // access 'sample' typed actor via custom route - assert("hello akka" == CamelContextManager.getMandatoryTemplate().requestBody("direct:test", "akka")); - - stopCamelService(); - -Here, `SimpleRegistry`_, a java.util.Map based registry, is used to register -typed actors. The CamelService is started and stopped programmatically. - -.. _SimpleRegistry: https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/impl/SimpleRegistry.java - - -.. _camel-intercepting-route-construction: - -Intercepting route construction -------------------------------- - -The previous section, :ref:`camel-components`, explained how to setup a route to -an (untyped) actor or typed actor manually. It was the application's -responsibility to define the route and add it to the current CamelContext. This -section explains a more conventient way to define custom routes: akka-camel is -still setting up the routes to consumer actors (and adds these routes to the -current CamelContext) but applications can define extensions to these routes. -Extensions can be defined with Camel's `Java DSL`_ or `Scala DSL`_. For example, -an extension could be a custom error handler that redelivers messages from an -endpoint to an actor's bounded mailbox when the mailbox was full. - -.. _Java DSL: http://camel.apache.org/dsl.html -.. _Scala DSL: http://camel.apache.org/scala-dsl.html - -The following examples demonstrate how to extend a route to a consumer actor for -handling exceptions thrown by that actor. To simplify the example, we configure -:ref:`camel-blocking-exchanges` which reports any exception, that is thrown by -receive, directly back to the Camel route. One could also report exceptions -asynchronously using a Failure reply (see also `this article`__) but we'll do it -differently here. - -__ http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html - - -Actors (untyped) -^^^^^^^^^^^^^^^^ - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor - import akka.camel.Consumer - - import org.apache.camel.builder.Builder - import org.apache.camel.model.RouteDefinition - - class ErrorHandlingConsumer extends Actor with Consumer { - def endpointUri = "direct:error-handler-test" - - // Needed to propagate exception back to caller - override def blocking = true - - onRouteDefinition {rd: RouteDefinition => - // Catch any exception and handle it by returning the exception message as response - rd.onException(classOf[Exception]).handled(true).transform(Builder.exceptionMessage).end - } - - protected def receive = { - case msg: Message => throw new Exception("error: %s" format msg.body) - } - } - -**Java** - -.. code-block:: java - - import akka.camel.UntypedConsumerActor; - - import org.apache.camel.builder.Builder; - import org.apache.camel.model.ProcessorDefinition; - import org.apache.camel.model.RouteDefinition; - - public class SampleErrorHandlingConsumer extends UntypedConsumerActor { - - public String getEndpointUri() { - return "direct:error-handler-test"; - } - - // Needed to propagate exception back to caller - public boolean isBlocking() { - return true; - } - - public void preStart() { - onRouteDefinition(new RouteDefinitionHandler() { - public ProcessorDefinition onRouteDefinition(RouteDefinition rd) { - // Catch any exception and handle it by returning the exception message as response - return rd.onException(Exception.class).handled(true).transform(Builder.exceptionMessage()).end(); - } - }); - } - - public void onReceive(Object message) throws Exception { - Message msg = (Message)message; - String body = msg.getBodyAs(String.class); - throw new Exception(String.format("error: %s", body)); - } - - } - - - -For (untyped) actors, consumer route extensions are defined by calling the -onRouteDefinition method with a route definition handler. In Scala, this is a -function of type ``RouteDefinition => ProcessorDefinition[_]``, in Java it is an -instance of ``RouteDefinitionHandler`` which is defined as follows. - -.. code-block:: scala - - package akka.camel - - import org.apache.camel.model.RouteDefinition - import org.apache.camel.model.ProcessorDefinition - - trait RouteDefinitionHandler { - def onRouteDefinition(rd: RouteDefinition): ProcessorDefinition[_] - } - -The akka-camel module creates a RouteDefinition instance by calling -from(endpointUri) on a Camel RouteBuilder (where endpointUri is the endpoint URI -of the consumer actor) and passes that instance as argument to the route -definition handler \*). The route definition handler then extends the route and -returns a ProcessorDefinition (in the above example, the ProcessorDefinition -returned by the end method. See the `org.apache.camel.model`__ package for -details). After executing the route definition handler, akka-camel finally calls -a to(actor:uuid:actorUuid) on the returned ProcessorDefinition to complete the -route to the comsumer actor (where actorUuid is the uuid of the consumer actor). - -\*) Before passing the RouteDefinition instance to the route definition handler, -akka-camel may make some further modifications to it. - -__ https://svn.apache.org/repos/asf/camel/trunk/camel-core/src/main/java/org/apache/camel/model/ - - -Typed actors -^^^^^^^^^^^^ - -For typed consumer actors to define a route definition handler, they must -provide a RouteDefinitionHandler implementation class with the @consume -annotation. The implementation class must have a no-arg constructor. Here's an -example (in Java). - -.. code-block:: java - - import org.apache.camel.builder.Builder; - import org.apache.camel.model.ProcessorDefinition; - import org.apache.camel.model.RouteDefinition; - - public class SampleRouteDefinitionHandler implements RouteDefinitionHandler { - public ProcessorDefinition onRouteDefinition(RouteDefinition rd) { - return rd.onException(Exception.class).handled(true).transform(Builder.exceptionMessage()).end(); - } - } - -It can be used as follows. - -**Scala** - -.. code-block:: scala - - trait TestTypedConsumer { - @consume(value="direct:error-handler-test", routeDefinitionHandler=classOf[SampleRouteDefinitionHandler]) - def foo(s: String): String - } - - // implementation class omitted - -**Java** - -.. code-block:: java - - public interface SampleErrorHandlingTypedConsumer { - - @consume(value="direct:error-handler-test", routeDefinitionHandler=SampleRouteDefinitionHandler.class) - String foo(String s); - - } - - // implementation class omitted - - -.. _camel-examples: - -Examples -======== - -For all features described so far, there's running sample code in -`akka-sample-camel`_. The examples in `sample.camel.Boot`_ are started during -Kernel startup because this class has been added to the boot :ref:`configuration`. - -.. _akka-sample-camel: http://github.com/jboner/akka/tree/master/akka-samples/akka-sample-camel/ -.. _sample.camel.Boot: http://github.com/jboner/akka/blob/master/akka-samples/akka-sample-camel/src/main/scala/sample/camel/Boot.scala - -.. code-block:: none - - akka { - ... - boot = ["sample.camel.Boot", ...] - ... - } - -If you don't want to have these examples started during Kernel startup, delete -it from the :ref:`configuration`. Other examples are standalone applications (i.e. classes with a -main method) that can be started from `sbt`_. - -.. _sbt: http://code.google.com/p/simple-build-tool/ - -.. code-block:: none - - $ sbt - [info] Building project akka 2.0-SNAPSHOT against Scala 2.9.0 - [info] using AkkaModulesParentProject with sbt 0.7.7 and Scala 2.7.7 - > project akka-sample-camel - Set current project to akka-sample-camel 2.0-SNAPSHOT - > run - ... - Multiple main classes detected, select one to run: - - [1] sample.camel.ClientApplication - [2] sample.camel.ServerApplication - [3] sample.camel.StandaloneSpringApplication - [4] sample.camel.StandaloneApplication - [5] sample.camel.StandaloneFileApplication - [6] sample.camel.StandaloneJmsApplication - - -Some of the examples in `akka-sample-camel`_ are described in more detail in the -following subsections. - - -.. _camel-async-example: - -Asynchronous routing and transformation example ------------------------------------------------ - -This example demonstrates how to implement consumer and producer actors that -support :ref:`camel-asynchronous-routing` with their Camel endpoints. The sample -application transforms the content of the Akka homepage, http://akka.io, by -replacing every occurrence of *Akka* with *AKKA*. After starting -the :ref:`microkernel`, direct the browser to http://localhost:8875 and the -transformed Akka homepage should be displayed. Please note that this example -will probably not work if you're behind an HTTP proxy. - -The following figure gives an overview how the example actors interact with -external systems and with each other. A browser sends a GET request to -http://localhost:8875 which is the published endpoint of the ``HttpConsumer`` -actor. The ``HttpConsumer`` actor forwards the requests to the ``HttpProducer`` -actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML -is then forwarded to the ``HttpTransformer`` actor which replaces all occurences -of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer -which finally returns it to the browser. - -.. image:: camel-async-interact.png - -Implementing the example actor classes and wiring them together is rather easy -as shown in the following snippet (see also `sample.camel.Boot`_). - -.. code-block:: scala - - import org.apache.camel.Exchange - import akka.actor.Actor._ - import akka.actor.{Actor, ActorRef} - import akka.camel.{Producer, Message, Consumer} - - class HttpConsumer(producer: ActorRef) extends Actor with Consumer { - def endpointUri = "jetty:http://0.0.0.0:8875/" - - protected def receive = { - case msg => producer forward msg - } - } - - class HttpProducer(transformer: ActorRef) extends Actor with Producer { - def endpointUri = "jetty://http://akka.io/?bridgeEndpoint=true" - - override protected def receiveBeforeProduce = { - // only keep Exchange.HTTP_PATH message header (which needed by bridge endpoint) - case msg: Message => msg.setHeaders(msg.headers(Set(Exchange.HTTP_PATH))) - } - - override protected def receiveAfterProduce = { - // do not reply but forward result to transformer - case msg => transformer forward msg - } - } - - class HttpTransformer extends Actor { - protected def receive = { - case msg: Message => self.reply(msg.transformBody {body: String => body replaceAll ("Akka ", "AKKA ")}) - case msg: Failure => self.reply(msg) - } - } - - // Wire and start the example actors - val httpTransformer = actorOf(new HttpTransformer) - val httpProducer = actorOf(new HttpProducer(httpTransformer)) - val httpConsumer = actorOf(new HttpConsumer(httpProducer)) - -The `jetty endpoints`_ of HttpConsumer and HttpProducer support asynchronous -in-out message exchanges and do not allocate threads for the full duration of -the exchange. This is achieved by using `Jetty continuations`_ on the -consumer-side and by using `Jetty's asynchronous HTTP client`_ on the producer -side. The following high-level sequence diagram illustrates that. - -.. _jetty endpoints: http://camel.apache.org/jetty.html -.. _Jetty continuations: http://wiki.eclipse.org/Jetty/Feature/Continuations -.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient - -.. image:: camel-async-sequence.png - - -Custom Camel route example --------------------------- - -This section also demonstrates the combined usage of a ``Producer`` and a -``Consumer`` actor as well as the inclusion of a custom Camel route. The -following figure gives an overview. - -.. image:: camel-custom-route.png - -* A consumer actor receives a message from an HTTP client - -* It forwards the message to another actor that transforms the message (encloses - the original message into hyphens) - -* The transformer actor forwards the transformed message to a producer actor - -* The producer actor sends the message to a custom Camel route beginning at the - ``direct:welcome`` endpoint - -* A processor (transformer) in the custom Camel route prepends "Welcome" to the - original message and creates a result message - -* The producer actor sends the result back to the consumer actor which returns - it to the HTTP client - - -The example is part of `sample.camel.Boot`_. The consumer, transformer and -producer actor implementations are as follows. - -.. code-block:: scala - - package sample.camel - - import akka.actor.{Actor, ActorRef} - import akka.camel.{Message, Consumer} - - class Consumer3(transformer: ActorRef) extends Actor with Consumer { - def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome" - - def receive = { - // Forward a string representation of the message body to transformer - case msg: Message => transformer.forward(msg.setBodyAs[String]) - } - } - - class Transformer(producer: ActorRef) extends Actor { - protected def receive = { - // example: transform message body "foo" to "- foo -" and forward result to producer - case msg: Message => producer.forward(msg.transformBody((body: String) => "- %s -" format body)) - } - } - - class Producer1 extends Actor with Producer { - def endpointUri = "direct:welcome" - } - -The producer actor knows where to reply the message to because the consumer and -transformer actors have forwarded the original sender reference as well. The -application configuration and the route starting from direct:welcome are as -follows. - -.. code-block:: scala - - package sample.camel - - import org.apache.camel.builder.RouteBuilder - import org.apache.camel.{Exchange, Processor} - - import akka.actor.Actor._ - import akka.camel.CamelContextManager - - class Boot { - CamelContextManager.init() - CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder) - - val producer = actorOf[Producer1] - val mediator = actorOf(new Transformer(producer)) - val consumer = actorOf(new Consumer3(mediator)) - } - - class CustomRouteBuilder extends RouteBuilder { - def configure { - from("direct:welcome").process(new Processor() { - def process(exchange: Exchange) { - // Create a 'welcome' message from the input message - exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody) - } - }) - } - } - -To run the example, start the :ref:`microkernel` and POST a message to -``http://localhost:8877/camel/welcome``. - -.. code-block:: none - - curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome - -The response should be: - -.. code-block:: none - - Welcome - Anke - - - -Publish-subcribe example ------------------------- - -JMS -^^^ - -This section demonstrates how akka-camel can be used to implement -publish/subscribe for actors. The following figure sketches an example for -JMS-based publish/subscribe. - -.. image:: camel-pubsub.png - -A consumer actor receives a message from an HTTP client. It sends the message to -a JMS producer actor (publisher). The JMS producer actor publishes the message -to a JMS topic. Two other actors that subscribed to that topic both receive the -message. The actor classes used in this example are shown in the following -snippet. - -.. code-block:: scala - - package sample.camel - - import akka.actor.{Actor, ActorRef} - import akka.camel.{Producer, Message, Consumer} - - class Subscriber(name:String, uri: String) extends Actor with Consumer { - def endpointUri = uri - - protected def receive = { - case msg: Message => println("%s received: %s" format (name, msg.body)) - } - } - - class Publisher(name: String, uri: String) extends Actor with Producer { - self.id = name - - def endpointUri = uri - - // one-way communication with JMS - override def oneway = true - } - - class PublisherBridge(uri: String, publisher: ActorRef) extends Actor with Consumer { - def endpointUri = uri - - protected def receive = { - case msg: Message => { - publisher ! msg.bodyAs[String] - self.reply("message published") - } - } - } - -Wiring these actors to implement the above example is as simple as - -.. code-block:: scala - - package sample.camel - - import org.apache.camel.impl.DefaultCamelContext - import org.apache.camel.spring.spi.ApplicationContextRegistry - import org.springframework.context.support.ClassPathXmlApplicationContext - - import akka.actor.Actor._ - import akka.camel.CamelContextManager - - class Boot { - // Create CamelContext with Spring-based registry and custom route builder - val context = new ClassPathXmlApplicationContext("/context-jms.xml", getClass) - val registry = new ApplicationContextRegistry(context) - CamelContextManager.init(new DefaultCamelContext(registry)) - - // Setup publish/subscribe example - val jmsUri = "jms:topic:test" - val jmsSubscriber1 = actorOf(new Subscriber("jms-subscriber-1", jmsUri)) - val jmsSubscriber2 = actorOf(new Subscriber("jms-subscriber-2", jmsUri)) - val jmsPublisher = actorOf(new Publisher("jms-publisher", jmsUri)) - - val jmsPublisherBridge = actorOf(new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/jms", jmsPublisher)) - } - -To publish messages to subscribers one could of course also use the JMS API -directly; there's no need to do that over a JMS producer actor as in this -example. For the example to work, Camel's `jms`_ component needs to be -configured with a JMS connection factory which is done in a Spring application -context XML file (context-jms.xml). - -.. _jms: http://camel.apache.org/jms.html - -.. code-block:: xml - - - - - - - - - - - - - - - - - - - - - - - - - -To run the example, start the :ref:`microkernel` and POST a -message to ``http://localhost:8877/camel/pub/jms``. - -.. code-block:: none - - curl -H "Content-Type: text/plain" -d "Happy hAkking" http://localhost:8877/camel/pub/jms - -The HTTP response body should be - -.. code-block:: none - - message published - -On the console, where you started the Akka Kernel, you should see something like - -.. code-block:: none - - ... - INF [20100622-11:49:57.688] camel: jms-subscriber-2 received: Happy hAkking - INF [20100622-11:49:57.688] camel: jms-subscriber-1 received: Happy hAkking - - -Cometd -^^^^^^ - -Publish/subscribe with `CometD`_ is equally easy using `Camel's cometd -component`_. - -.. _CometD: http://cometd.org/ -.. _Camel's cometd component: http://camel.apache.org/cometd.html - -.. image:: camel-pubsub2.png - -All actor classes from the JMS example can re-used, only the endpoint URIs need -to be changed. - -.. code-block:: scala - - package sample.camel - - import org.apache.camel.impl.DefaultCamelContext - import org.apache.camel.spring.spi.ApplicationContextRegistry - import org.springframework.context.support.ClassPathXmlApplicationContext - - import akka.actor.Actor._ - import akka.camel.CamelContextManager - - class Boot { - // ... - - // Setup publish/subscribe example - val cometdUri = "cometd://localhost:8111/test/abc?resourceBase=target" - val cometdSubscriber = actorOf(new Subscriber("cometd-subscriber", cometdUri)) - val cometdPublisher = actorOf(new Publisher("cometd-publisher", cometdUri)) - - val cometdPublisherBridge = actorOf(new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/cometd", cometdPublisher)) - } - - -Quartz Scheduler Example ------------------------- - -Here is an example showing how simple is to implement a cron-style scheduler by -using the Camel Quartz component in Akka. - -The following example creates a "timer" actor which fires a message every 2 -seconds: - -.. code-block:: scala - - package com.dimingo.akka - - import akka.actor.Actor - import akka.actor.Actor.actorOf - - import akka.camel.{Consumer, Message} - import akka.camel.CamelServiceManager._ - - class MyQuartzActor extends Actor with Consumer { - - def endpointUri = "quartz://example?cron=0/2+*+*+*+*+?" - - def receive = { - - case msg => println("==============> received %s " format msg) - - } // end receive - - } // end MyQuartzActor - - object MyQuartzActor { - - def main(str: Array[String]) { - - // start the Camel service - startCamelService - - // create and start a quartz actor - val myActor = actorOf[MyQuartzActor] - - } // end main - - } // end MyQuartzActor - -The full working example is available for download here: -http://www.dimingo.com/akka/examples/example-akka-quartz.tar.gz - -You can launch it using the maven command: - -.. code-block:: none - - $ mvn scala:run -DmainClass=com.dimingo.akka.MyQuartzActor - -For more information about the Camel Quartz component, see here: -http://camel.apache.org/quartz.html diff --git a/akka-docs/modules/microkernel.rst b/akka-docs/modules/microkernel.rst index cbf9ba96ba..266d888b6c 100644 --- a/akka-docs/modules/microkernel.rst +++ b/akka-docs/modules/microkernel.rst @@ -5,36 +5,4 @@ Microkernel ############# - -Run the microkernel -=================== - -To start the kernel use the scripts in the ``bin`` directory. - -All services are configured in the :ref:`configuration` file in the ``config`` directory. -Services you want to be started up automatically should be listed in the list of ``boot`` classes in -the :ref:`configuration`. - -Put your application in the ``deploy`` directory. - - -Akka Home ---------- - -Note that the microkernel needs to know where the Akka home is (the base -directory of the microkernel). The above scripts do this for you. Otherwise, you -can set Akka home by: - -* Specifying the ``AKKA_HOME`` environment variable - -* Specifying the ``-Dakka.home`` java option - - -.. _hello-microkernel: - -Hello Microkernel -================= - -There is a very simple Akka Mist sample project included in the microkernel -``deploy`` directory. Start the microkernel with the start script and then go to -http://localhost:9998 to say Hello to the microkernel. +The Akka Spring module has not been migrated to Akka 2.0-SNAPSHOT yet. diff --git a/akka-docs/modules/spring.rst b/akka-docs/modules/spring.rst index 29bf4632cf..adf226c642 100644 --- a/akka-docs/modules/spring.rst +++ b/akka-docs/modules/spring.rst @@ -5,331 +5,4 @@ Spring Integration #################### -Module stability: **STABLE** - -Akkas integration with the `Spring Framework `_ supplies the Spring way of using the Typed Actor Java API and for CamelService configuration for :ref:`camel-spring-applications`. It uses Spring's custom namespaces to create Typed Actors, supervisor hierarchies and a CamelService in a Spring environment. - -Contents: - -.. contents:: :local: - -To use the custom name space tags for Akka you have to add the XML schema definition to your spring configuration. It is available at `http://akka.io/akka-1.0.xsd `_. The namespace for Akka is: - -.. code-block:: xml - - xmlns:akka="http://akka.io/schema/akka" - -Example header for Akka Spring configuration: - -.. code-block:: xml - - - - -- - -Actors ------- - -Actors in Java are created by extending the 'UntypedActor' class and implementing the 'onReceive' method. - -Example how to create Actors with the Spring framework: - -.. code-block:: xml - - - - - - -Supported scopes are singleton and prototype. Dependencies and properties are set with Springs ```` element. -A dependency can be either a ```` or a regular ````. - -Get the Actor from the Spring context: - -.. code-block:: java - - ApplicationContext context = new ClassPathXmlApplicationContext("akka-spring-config.xml"); - ActorRef actorRef = (ActorRef) context.getBean("myActor"); - -Typed Actors ------------- - -Here are some examples how to create Typed Actors with the Spring framework: - -Creating a Typed Actor: -^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: xml - - - - - - - - -Supported scopes are singleton and prototype. Dependencies and properties are set with Springs ```` element. -A dependency can be either a ```` or a regular ````. - -Get the Typed Actor from the Spring context: - -.. code-block:: java - - ApplicationContext context = new ClassPathXmlApplicationContext("akka-spring-config.xml"); - MyPojo myPojo = (MyPojo) context.getBean("myActor"); - -Remote Actors -------------- - -For details on server managed and client managed remote actors see Remote Actor documentation. - -Configuration for a client managed remote Actor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:: - - - - - -The default for 'managed-by' is "client", so in the above example it could be left out. - -Configuration for a server managed remote Actor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Server side -*********** - -:: - - - - - - - - - - -If the server specified by 'host' and 'port' does not exist it will not be registered. - -Client side -*********** - -:: - - - - - -Configuration for a client managed remote Typed Actor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: xml - - - - - -Configuration for a server managed remote Typed Actor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Sever side setup -**************** - -:: - - - - - -Client side setup -***************** - -:: - - - - -Dispatchers ------------ - -Configuration for a Typed Actor or Untyped Actor with a custom dispatcher -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you don't want to use the default dispatcher you can define your own dispatcher in the spring configuration. For more information on dispatchers have a look at Dispatchers documentation. - -.. code-block:: xml - - - - - - - - - - - -If you want to or have to share the dispatcher between Actors you can define a dispatcher and reference it from the Typed Actor configuration: - -.. code-block:: xml - - - - - - - - - -The following dispatcher types are available in spring configuration: - -* executor-based-event-driven -* executor-based-event-driven-work-stealing -* thread-based - -The following queue types are configurable for dispatchers using thread pools: - -* bounded-linked-blocking-queue -* unbounded-linked-blocking-queue -* synchronous-queue -* bounded-array-blocking-queue - -If you have set up your IDE to be XSD-aware you can easily write your configuration through auto-completion. - -Stopping Typed Actors and Untyped Actors ----------------------------------------- - -Actors with scope singleton are stopped when the application context is closed. Actors with scope prototype must be stopped by the application. - -Supervisor Hierarchies ----------------------- - -The supervisor configuration in Spring follows the declarative configuration for the Java API. Have a look at Akka's approach to fault tolerance. - -Example spring supervisor configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: xml - - - - - - - java.io.IOException - - - - - - - - - - - - - - java.io.IOException - java.lang.NullPointerException - - - - - - - - - - -Get the TypedActorConfigurator from the Spring context -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: java - - TypedActorConfigurator myConfigurator = (TypedActorConfigurator) context.getBean("my-supervisor"); - MyPojo myPojo = (MyPOJO) myConfigurator.getInstance(MyPojo.class); - -Property Placeholders ---------------------- - -The Akka configuration can be made available as property placeholders by using a custom property placeholder configurer for Configgy: - -:: - - - - - - - -Camel configuration -------------------- - -For details refer to the :ref:`camel-module` documentation: - -* CamelService configuration for :ref:`camel-spring-applications` -* Access to Typed Actors :ref:`camel-typed-actors-using-spring` +The Akka Spring module has not been migrated to Akka 2.0-SNAPSHOT yet. \ No newline at end of file From 92a0fa7e8e5105d33d34ebb19b8b89081867dc50 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 12:18:14 +0100 Subject: [PATCH 19/27] DOC: Removed tutorial chat server. See #1455 --- akka-docs/scala/index.rst | 1 - akka-docs/scala/tutorial-chat-server.rst | 8 -------- 2 files changed, 9 deletions(-) delete mode 100644 akka-docs/scala/tutorial-chat-server.rst diff --git a/akka-docs/scala/index.rst b/akka-docs/scala/index.rst index 20fa2cb887..683eaf2fa0 100644 --- a/akka-docs/scala/index.rst +++ b/akka-docs/scala/index.rst @@ -20,4 +20,3 @@ Scala API fsm http testing - tutorial-chat-server diff --git a/akka-docs/scala/tutorial-chat-server.rst b/akka-docs/scala/tutorial-chat-server.rst deleted file mode 100644 index 0df783627a..0000000000 --- a/akka-docs/scala/tutorial-chat-server.rst +++ /dev/null @@ -1,8 +0,0 @@ -Tutorial: write a scalable, fault-tolerant, network chat server and client (Scala) -============================================================================================= - -.. sidebar:: Contents - - .. contents:: :local: - -REWRITE ME \ No newline at end of file From ad0a67c77d0827af445aee92121800f4275a1ec7 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 12:19:20 +0100 Subject: [PATCH 20/27] DOC: Removed actor registry. See #1455 --- akka-docs/java/actor-registry.rst | 98 -------------------------- akka-docs/java/index.rst | 1 - akka-docs/scala/actor-registry.rst | 106 ----------------------------- akka-docs/scala/index.rst | 1 - 4 files changed, 206 deletions(-) delete mode 100644 akka-docs/java/actor-registry.rst delete mode 100644 akka-docs/scala/actor-registry.rst diff --git a/akka-docs/java/actor-registry.rst b/akka-docs/java/actor-registry.rst deleted file mode 100644 index 32a5af42c6..0000000000 --- a/akka-docs/java/actor-registry.rst +++ /dev/null @@ -1,98 +0,0 @@ -ActorRegistry (Java) -==================== - -Module stability: **SOLID** - -ActorRegistry: Finding Actors ------------------------------ - -Actors can be looked up using the 'akka.actor.Actors.registry()' object. Through this registry you can look up actors by: - -* uuid com.eaio.uuid.UUID – this uses the ``uuid`` field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None -* id string – this uses the ``id`` field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id -* parameterized type - returns a ``ActorRef[]`` with all actors that are a subtype of this specific type -* specific actor class - returns a ``ActorRef[]`` with all actors of this exact class - -Actors are automatically registered in the ActorRegistry when they are started and removed when they are stopped. But you can explicitly register and unregister ActorRef's if you need to using the ``register`` and ``unregister`` methods. - -Here is a summary of the API for finding actors: - -.. code-block:: java - - import static akka.actor.Actors.*; - Option actor = registry().actorFor(uuid); - ActorRef[] actors = registry().actors(); - ActorRef[] otherActors = registry().actorsFor(id); - ActorRef[] moreActors = registry().actorsFor(clazz); - -You can shut down all Actors in the system by invoking: - -.. code-block:: java - - registry().shutdownAll(); - -If you want to know when a new Actor is added to or removed from the registry, you can use the subscription API on the registry. You can register an Actor that should be notified when an event happens in the ActorRegistry: - -.. code-block:: java - - void addListener(ActorRef listener); - void removeListener(ActorRef listener); - -The messages sent to this Actor are: - -.. code-block:: java - - public class ActorRegistered { - ActorRef getActor(); - String getAddress(); - } - - public class ActorUnregistered { - ActorRef actor(); - String getAddress(); - } - - public class TypedActorRegistered { - ActorRef getActor(); - String getAddress(); - Object getProxy(); - } - - public class TypedActorUnregistered { - ActorRef actor(); - String getAddress(); - Object getProxy(); - } - -So your listener Actor needs to be able to handle these two messages. Example: - -.. code-block:: java - - import akka.actor.ActorRegistered; - import akka.actor.ActorUnregistered; - import akka.actor.TypedActorRegistered; - import akka.actor.TypedActorUnregistered; - import akka.actor.UntypedActor; - import akka.event.EventHandler; - - public class RegistryListener extends UntypedActor { - public void onReceive(Object message) throws Exception { - if (message instanceof ActorRegistered) { - ActorRegistered event = (ActorRegistered) message; - EventHandler.info(this, String.format("Actor registered: %s - %s", - event.actor().actorClassName(), event.actor().getUuid())); - event.actor().actorClassName(), event.actor().getUuid())); - } else if (message instanceof ActorUnregistered) { - // ... - } - } - } - -The above actor can be added as listener of registry events: - -.. code-block:: java - - import static akka.actor.Actors.*; - - ActorRef listener = actorOf(RegistryListener.class); - registry().addListener(listener); diff --git a/akka-docs/java/index.rst b/akka-docs/java/index.rst index e58c9a72f0..2ff3d0252e 100644 --- a/akka-docs/java/index.rst +++ b/akka-docs/java/index.rst @@ -8,7 +8,6 @@ Java API untyped-actors typed-actors - actor-registry futures dataflow stm diff --git a/akka-docs/scala/actor-registry.rst b/akka-docs/scala/actor-registry.rst deleted file mode 100644 index f5a6290cdf..0000000000 --- a/akka-docs/scala/actor-registry.rst +++ /dev/null @@ -1,106 +0,0 @@ -ActorRegistry (Scala) -===================== - -Module stability: **SOLID** - -ActorRegistry: Finding Actors ------------------------------ - -Actors can be looked up by using the ``akka.actor.Actor.registry: akka.actor.ActorRegistry``. Lookups for actors through this registry can be done by: - -* uuid akka.actor.Uuid – this uses the ``uuid`` field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None -* id string – this uses the ``id`` field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id -* specific actor class - returns an ``Array[Actor]`` with all actors of this exact class -* parameterized type - returns an ``Array[Actor]`` with all actors that are a subtype of this specific type - -Actors are automatically registered in the ActorRegistry when they are started, removed or stopped. You can explicitly register and unregister ActorRef's by using the ``register`` and ``unregister`` methods. The ActorRegistry contains many convenience methods for looking up typed actors. - -Here is a summary of the API for finding actors: - -.. code-block:: scala - - def actors: Array[ActorRef] - def actorFor(uuid: akka.actor.Uuid): Option[ActorRef] - def actorsFor(id : String): Array[ActorRef] - def actorsFor[T <: Actor](implicit manifest: Manifest[T]): Array[ActorRef] - def actorsFor[T <: Actor](clazz: Class[T]): Array[ActorRef] - - // finding typed actors - def typedActors: Array[AnyRef] - def typedActorFor(uuid: akka.actor.Uuid): Option[AnyRef] - def typedActorsFor(id: String): Array[AnyRef] - def typedActorsFor[T <: AnyRef](implicit manifest: Manifest[T]): Array[AnyRef] - def typedActorsFor[T <: AnyRef](clazz: Class[T]): Array[AnyRef] - -Examples of how to use them: - -.. code-block:: scala - - val actor = Actor.registry.actorFor(uuid) - val pojo = Actor.registry.typedActorFor(uuid) - -.. code-block:: scala - - val actors = Actor.registry.actorsFor(classOf[...]) - val pojos = Actor.registry.typedActorsFor(classOf[...]) - -.. code-block:: scala - - val actors = Actor.registry.actorsFor(id) - val pojos = Actor.registry.typedActorsFor(id) - -.. code-block:: scala - - val actors = Actor.registry.actorsFor[MyActorType] - val pojos = Actor.registry.typedActorsFor[MyTypedActorImpl] - -The ActorRegistry also has a 'shutdownAll' and 'foreach' methods: - -.. code-block:: scala - - def foreach(f: (ActorRef) => Unit) - def foreachTypedActor(f: (AnyRef) => Unit) - def shutdownAll() - -If you need to know when a new Actor is added or removed from the registry, you can use the subscription API. You can register an Actor that should be notified when an event happens in the ActorRegistry: - -.. code-block:: scala - - def addListener(listener: ActorRef) - def removeListener(listener: ActorRef) - -The messages sent to this Actor are: - -.. code-block:: scala - - case class ActorRegistered(@BeanProperty address: String,@BeanProperty actor: ActorRef) extends ActorRegistryEvent - case class ActorUnregistered(@BeanProperty address: String, @BeanProperty actor: ActorRef) extends ActorRegistryEvent - case class TypedActorRegistered(@BeanProperty address: String, @BeanProperty actor: ActorRef, @BeanProperty proxy: AnyRef) extends ActorRegistryEvent - case class TypedActorUnregistered(@BeanProperty address: String, @BeanProperty actor: ActorRef, @BeanProperty proxy: AnyRef) extends ActorRegistryEvent - -So your listener Actor needs to be able to handle these messages. Example: - -.. code-block:: scala - - import akka.actor._ - import akka.event.EventHandler - - class RegistryListener extends Actor { - def receive = { - case event: ActorRegistered => - EventHandler.info(this, "Actor registered: %s - %s".format( - event.actor.actorClassName, event.actor.uuid)) - case event: ActorUnregistered => - // ... - } - } - -The above actor can be added as listener of registry events: - -.. code-block:: scala - - import akka.actor._ - import akka.actor.Actor._ - - val listener = actorOf[RegistryListener] - registry.addListener(listener) diff --git a/akka-docs/scala/index.rst b/akka-docs/scala/index.rst index 683eaf2fa0..cbcca36b6c 100644 --- a/akka-docs/scala/index.rst +++ b/akka-docs/scala/index.rst @@ -8,7 +8,6 @@ Scala API actors typed-actors - actor-registry futures dataflow agents From 4df0ec582368b13e45d5040a0fed1bbea2e70e60 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 12:27:45 +0100 Subject: [PATCH 21/27] DOC: Removed most of http docs. See #1455 --- akka-docs/{scala => disabled}/http.rst | 0 akka-docs/modules/http.rst | 55 ++++++++++++++++++++++++++ akka-docs/modules/index.rst | 1 + 3 files changed, 56 insertions(+) rename akka-docs/{scala => disabled}/http.rst (100%) create mode 100644 akka-docs/modules/http.rst diff --git a/akka-docs/scala/http.rst b/akka-docs/disabled/http.rst similarity index 100% rename from akka-docs/scala/http.rst rename to akka-docs/disabled/http.rst diff --git a/akka-docs/modules/http.rst b/akka-docs/modules/http.rst new file mode 100644 index 0000000000..a18b182f0b --- /dev/null +++ b/akka-docs/modules/http.rst @@ -0,0 +1,55 @@ +.. _http-module: + +HTTP +==== + +.. sidebar:: Contents + + .. contents:: :local: + +When deploying in a servlet container: +-------------------------------------------- + +If you deploy Akka in a JEE container, don't forget to create an Akka initialization and cleanup hook: + +.. code-block:: scala + + package com.my //<--- your own package + import akka.util.AkkaLoader + import akka.cluster.BootableRemoteActorService + import akka.actor.BootableActorLoaderService + import javax.servlet.{ServletContextListener, ServletContextEvent} + + /** + * This class can be added to web.xml mappings as a listener to start and postStop Akka. + * + * ... + * + * com.my.Initializer + * + * ... + * + */ + class Initializer extends ServletContextListener { + lazy val loader = new AkkaLoader + def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown + def contextInitialized(e: ServletContextEvent): Unit = + loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService) //<--- Important + // loader.boot(true, new BootableActorLoaderService {}) // If you don't need akka-remote + } + +For Java users, it's currently only possible to use BootableActorLoaderService, but you'll need to use: akka.actor.DefaultBootableActorLoaderService + + +Then you just declare it in your web.xml: + +.. code-block:: xml + + + ... + + your.package.Initializer + + ... + + diff --git a/akka-docs/modules/index.rst b/akka-docs/modules/index.rst index 780d5b23df..603eeb2084 100644 --- a/akka-docs/modules/index.rst +++ b/akka-docs/modules/index.rst @@ -5,6 +5,7 @@ Modules :maxdepth: 2 durable-mailbox + http microkernel camel spring From d92c52b1b5edd7481d1a61a3168ab7d59561c5ef Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 12:33:39 +0100 Subject: [PATCH 22/27] DOC: Removed old migration guides and release notes. See #1455 --- .../project/migration-guide-0.10.x-1.0.x.rst | 447 ------------ .../project/migration-guide-0.7.x-0.8.x.rst | 94 --- .../project/migration-guide-0.8.x-0.9.x.rst | 172 ----- .../project/migration-guide-0.9.x-0.10.x.rst | 47 -- .../project/migration-guide-1.0.x-1.1.x.rst | 78 --- .../project/migration-guide-1.1.x-1.2.x.rst | 6 - .../project/migration-guide-1.2.x-2.0.x.rst | 20 - .../project/migration-guide-1.3.x-2.0.x.rst | 14 + akka-docs/project/migration-guides.rst | 8 +- akka-docs/project/release-notes.rst | 644 +----------------- akka-docs/scala/index.rst | 1 - 11 files changed, 17 insertions(+), 1514 deletions(-) delete mode 100644 akka-docs/project/migration-guide-0.10.x-1.0.x.rst delete mode 100644 akka-docs/project/migration-guide-0.7.x-0.8.x.rst delete mode 100644 akka-docs/project/migration-guide-0.8.x-0.9.x.rst delete mode 100644 akka-docs/project/migration-guide-0.9.x-0.10.x.rst delete mode 100644 akka-docs/project/migration-guide-1.0.x-1.1.x.rst delete mode 100644 akka-docs/project/migration-guide-1.1.x-1.2.x.rst delete mode 100644 akka-docs/project/migration-guide-1.2.x-2.0.x.rst create mode 100644 akka-docs/project/migration-guide-1.3.x-2.0.x.rst diff --git a/akka-docs/project/migration-guide-0.10.x-1.0.x.rst b/akka-docs/project/migration-guide-0.10.x-1.0.x.rst deleted file mode 100644 index fbc951229b..0000000000 --- a/akka-docs/project/migration-guide-0.10.x-1.0.x.rst +++ /dev/null @@ -1,447 +0,0 @@ -Migration Guide 0.10.x to 1.0.x -==================================== - -Akka & Akka Modules separated into two different repositories and distributions -------------------------------------------------------------------------------- - -Akka is split up into two different parts: -* Akka - Reflects all the sections under 'Scala API' and 'Java API' in the navigation bar. -* Akka Modules - Reflects all the sections under 'Add-on modules' in the navigation bar. - -Download the release you need (Akka core or Akka Modules) from ``_ and unzip it. - ----- - -Changed Akka URI ----------------- - -http://akkasource.org changed to http://akka.io - -Reflects XSDs, Maven repositories, ScalaDoc etc. - ----- - -Removed 'se.scalablesolutions' prefix -------------------------------------- - -We have removed some boilerplate by shortening the Akka package from -**se.scalablesolutions.akka** to just **akka** so just do a search-replace in your project, -we apologize for the inconvenience, but we did it for our users. - ----- - -Akka-core is no more --------------------- - -Akka-core has been split into akka-actor, akka-stm, akka-typed-actor & akka-remote this means that you need to update any deps you have on akka-core. - ----- - -Config ------- - -Turning on/off modules -^^^^^^^^^^^^^^^^^^^^^^ - -All the 'service = on' elements for turning modules on and off have been replaced by a top-level list of the enabled services. - -Services available for turning on/off are: -* "remote" -* "http" -* "camel" - -**All** services are **OFF** by default. Enable the ones you are using. - -.. code-block:: ruby - - akka { - enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"] - } - -Renames -^^^^^^^ - -* 'rest' section - has been renamed to 'http' to align with the module name 'akka-http'. -* 'storage' section - has been renamed to 'persistence' to align with the module name 'akka-persistence'. - -.. code-block:: ruby - - akka { - http { - .. - } - - persistence { - .. - } - } - ----- - -Important changes from RC2-RC3 ------------------------------- - -**akka.config.SupervisionSupervise** - -**Scala** - -.. code-block:: scala - - def apply(actorRef: ActorRef, lifeCycle: LifeCycle, registerAsRemoteService: Boolean = false) - -- boolean instead of remoteAddress, registers that actor with it's id as service name on the local server - -**akka.actor.Actors now is the API for Java to interact with Actors, Remoting and ActorRegistry:** - -**Java** - -.. code-block:: java - - import static akka.actor.Actors.*; // <-- The important part - - actorOf(); - remote().actorOf(); - registry().actorsFor("foo"); - -***akka.actor.Actor now is the API for Scala to interact with Actors, Remoting and ActorRegistry:*** - -**Scala** - -.. code-block:: scala - - import akka.actor.Actor._ // <-- The important part - - actorOf().method - remote.actorOf() - registry.actorsFor("foo") - -**object UntypedActor has been deleted and replaced with akka.actor.Actors/akka.actor.Actor (Java/Scala)** - -- UntypedActor.actorOf -> Actors.actorOf (Java) or Actor.actorOf (Scala) - -**object ActorRegistry has been deleted and replaced with akka.actor.Actors.registry()/akka.actor.Actor.registry (Java/Scala)** - -- ActorRegistry. -> Actors.registry(). (Java) or Actor.registry. (Scala) - -**object RemoteClient has been deleted and replaced with akka.actor.Actors.remote()/akka.actor.Actor.remote (Java/Scala)** - -- RemoteClient -> Actors.remote() (Java) or Actor.remote (Scala) - -**object RemoteServer has been deleted and replaced with akka.actor.Actors.remote()/akka.actor.Actor.remote (Java/Scala)** - -- RemoteServer - deleted -> Actors.remote() (Java) or Actor.remote (Scala) - -**classes RemoteActor, RemoteUntypedActor and RemoteUntypedConsumerActors has been deleted and replaced with akka.actor.Actors.remote().actorOf(x, host port)/akka.actor.Actor.remote.actorOf(x, host, port)** - -- RemoteActor, RemoteUntypedActor - deleted, use: remote().actorOf(YourActor.class, host, port) (Java) or remote.actorOf[YourActor](host, port) - -**Remoted spring-actors now default to spring id as service-name, use "service-name" attribute on "remote"-tag to override** - -**Listeners for RemoteServer and RemoteClient** are now registered on Actors.remote().addListener (Java) or Actor.remote.addListener (Scala), this means that all listeners get all remote events, both remote server evens and remote client events, **so adjust your code accordingly.** - -**ActorRef.startLinkRemote has been removed since one specified on creation wether the actor is client-managed or not.** - -Important change from RC3 to RC4 --------------------------------- - -The Akka-Spring namespace has changed from akkasource.org and scalablesolutions.se to http://akka.io/schema and http://akka.io/akka-.xsd - -Module akka-actor ------------------ - -The Actor.init callback has been renamed to "preStart" to align with the general callback naming and is more clear about when it's called. - -The Actor.shutdown callback has been renamed to "postStop" to align with the general callback naming and is more clear about when it's called. - -The Actor.initTransactionalState callback has been removed, logic should be moved to preStart and be wrapped in an atomic block - -**se.scalablesolutions.akka.config.ScalaConfig** and **se.scalablesolutions.akka.config.JavaConfig** have been merged into **akka.config.Supervision** - -**RemoteAddress** has moved from **se.scalablesolutions.akka.config.ScalaConfig** to **akka.config** - -The ActorRef.lifeCycle has changed signature from Option[LifeCycle] to LifeCycle, this means you need to change code that looks like this: -**self.lifeCycle = Some(LifeCycle(Permanent))** to **self.lifeCycle = Permanent** - -The equivalent to **self.lifeCycle = None** is **self.lifeCycle = UndefinedLifeCycle** -**LifeCycle(Permanent)** becomes **Permanent** -**new LifeCycle(permanent())** becomes **permanent()** (need to do: import static se.scalablesolutions.akka.config.Supervision.*; first) - -**JavaConfig.Component** and **ScalaConfig.Component** have been consolidated and renamed as **Supervision.SuperviseTypedActor** - -**self.trapExit** has been moved into the FaultHandlingStrategy, and **ActorRef.faultHandler** has switched type from Option[FaultHandlingStrategy] -to FaultHandlingStrategy: - -**Scala** - -.. code-block:: scala - - import akka.config.Supervision._ - - self.faultHandler = OneForOneStrategy(List(classOf[Exception]), 3, 5000) - -**Java** - -.. code-block:: java - - import static akka.Supervision.*; - - getContext().setFaultHandler(new OneForOneStrategy(new Class[] { Exception.class },50,1000)) - -**RestartStrategy, AllForOne, OneForOne** have been replaced with **AllForOneStrategy** and **OneForOneStrategy** in **se.scalablesolutions.akka.config.Supervision** - -**Scala** - -.. code-block:: scala - - import akka.config.Supervision._ - SupervisorConfig( - OneForOneStrategy(List(classOf[Exception]), 3, 5000), - Supervise(pingpong1,Permanent) :: Nil - ) - -**Java** - -.. code-block:: java - - import static akka.Supervision.*; - - new SupervisorConfig( - new OneForOneStrategy(new Class[] { Exception.class },50,1000), - new Server[] { new Supervise(pingpong1, permanent()) } - ) - -***We have removed the following factory methods:*** - -**Actor.actor { case foo => bar }** -**Actor.transactor { case foo => bar }** -**Actor.temporaryActor { case foo => bar }** -**Actor.init {} receive { case foo => bar }** - -They started the actor and no config was possible, it was inconsistent and irreparable. - -replace with your own factories, or: - -**Scala** - -.. code-block:: scala - - actorOf( new Actor { def receive = { case foo => bar } } ).start - actorOf( new Actor { self.lifeCycle = Temporary; def receive = { case foo => bar } } ).start - -ReceiveTimeout is now rescheduled after every message, before there was only an initial timeout. -To stop rescheduling of ReceiveTimeout, set **receiveTimeout = None** - -HotSwap -------- - -HotSwap does no longer use behavior stacking by default, but that is an option to both "become" and HotSwap. - -HotSwap now takes for Scala a Function from ActorRef to a Receive, the ActorRef passed in is the reference to self, so you can do self.reply() etc. - ----- - -Module akka-stm ---------------- - -The STM stuff is now in its own module. This means that there is no support for transactions or transactors in akka-actor. - -Local and global -^^^^^^^^^^^^^^^^ - -The **local/global** distinction has been dropped. This means that if the following general import was being used: - -**Scala** - -.. code-block:: scala - - import akka.stm.local._ - -this is now just: - -**Scala** - -.. code-block:: scala - - import akka.stm._ - -Coordinated is the new global -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There is a new explicit mechanism for coordinated transactions. See the `Scala Transactors `_ and `Java Transactors `_ documentation for more information. Coordinated transactions and transactors are found in the ``akka.transactor`` package now. The usage of transactors has changed. - -Agents -^^^^^^ - -Agent is now in the akka-stm module and has moved to the ``akka.agent`` package. The implementation has been reworked and is now closer to Clojure agents. There is not much difference in general usage, the main changes involve interaction with the STM. - -While updates to Agents are asynchronous, the state of an Agent is always immediately available for reading by any thread. Agents are integrated with the STM - any dispatches made in a transaction are held until that transaction commits, and are discarded if it is retried or aborted. There is a new ``sendOff`` method for long-running or blocking update functions. - ----- - -Module akka-camel ------------------ - -Access to the CamelService managed by CamelServiceManager has changed: - -* Method service renamed to mandatoryService (Scala) -* Method service now returns Option[CamelService] (Scala) -* Introduced method getMandatoryService() (Java) -* Introduced method getService() (Java) - -**Scala** - -.. code-block:: scala - - import se.scalablesolutions.akka.camel.CamelServiceManager._ - import se.scalablesolutions.akka.camel.CamelService - - val o: Option[CamelService] = service - val s: CamelService = mandatoryService - -**Java** - -.. code-block:: java - - import se.scalablesolutions.akka.camel.CamelService; - import se.scalablesolutions.akka.japi.Option; - import static se.scalablesolutions.akka.camel.CamelServiceManager.*; - - Option o = getService(); - CamelService s = getMandatoryService(); - -Access to the CamelContext and ProducerTemplate managed by CamelContextManager has changed: - -* Method context renamed to mandatoryContext (Scala) -* Method template renamed to mandatoryTemplate (Scala) -* Method service now returns Option[CamelContext] (Scala) -* Method template now returns Option[ProducerTemplate] (Scala) -* Introduced method getMandatoryContext() (Java) -* Introduced method getContext() (Java) -* Introduced method getMandatoryTemplate() (Java) -* Introduced method getTemplate() (Java) - -**Scala** - -.. code-block:: scala - - import org.apache.camel.CamelContext - import org.apache.camel.ProducerTemplate - - import se.scalablesolutions.akka.camel.CamelContextManager._ - - val co: Option[CamelContext] = context - val to: Option[ProducerTemplate] = template - - val c: CamelContext = mandatoryContext - val t: ProducerTemplate = mandatoryTemplate - -**Java** - -.. code-block:: java - - import org.apache.camel.CamelContext; - import org.apache.camel.ProducerTemplate; - - import se.scalablesolutions.akka.japi.Option; - import static se.scalablesolutions.akka.camel.CamelContextManager.*; - - Option co = getContext(); - Option to = getTemplate(); - - CamelContext c = getMandatoryContext(); - ProducerTemplate t = getMandatoryTemplate(); - -The following methods have been renamed on class se.scalablesolutions.akka.camel.Message: - -* bodyAs(Class) has been renamed to getBodyAs(Class) -* headerAs(String, Class) has been renamed to getHeaderAs(String, Class) - -The API for waiting for consumer endpoint activation and de-activation has been changed - -* CamelService.expectEndpointActivationCount has been removed and replaced by CamelService.awaitEndpointActivation -* CamelService.expectEndpointDeactivationCount has been removed and replaced by CamelService.awaitEndpointDeactivation - -**Scala** - -.. code-block:: scala - - import se.scalablesolutions.akka.actor.Actor - import se.scalablesolutions.akka.camel.CamelServiceManager._ - - val s = startCamelService - val actor = Actor.actorOf[SampleConsumer] - - // wait for 1 consumer being activated - s.awaitEndpointActivation(1) { - actor.start - } - - // wait for 1 consumer being de-activated - s.awaitEndpointDeactivation(1) { - actor.stop - } - - s.stop - -**Java** - -.. code-block:: java - - import java.util.concurrent.TimeUnit; - import se.scalablesolutions.akka.actor.ActorRef; - import se.scalablesolutions.akka.actor.Actors; - import se.scalablesolutions.akka.camel.CamelService; - import se.scalablesolutions.akka.japi.SideEffect; - import static se.scalablesolutions.akka.camel.CamelServiceManager.*; - - CamelService s = startCamelService(); - final ActorRef actor = Actors.actorOf(SampleUntypedConsumer.class); - - // wait for 1 consumer being activated - s.awaitEndpointActivation(1, new SideEffect() { - public void apply() { - actor.start(); - } - }); - - // wait for 1 consumer being de-activated - s.awaitEndpointDeactivation(1, new SideEffect() { - public void apply() { - actor.stop(); - } - }); - - s.stop(); - -Module Akka-Http ----------------- - -Atmosphere support has been removed. If you were using akka.comet.AkkaServlet for Jersey support only, -you can switch that to: akka.http.AkkaRestServlet and it should work just like before. - -Atmosphere has been removed because we have a new async http support in the form of Akka Mist, a very thin bridge -between Servlet3.0/JettyContinuations and Actors, enabling Http-as-messages, read more about it here: -http://doc.akka.io/http#Mist%20-%20Lightweight%20Asynchronous%20HTTP - -If you really need Atmosphere support, you can add it yourself by following the steps listed at the start of: -http://doc.akka.io/comet - -Module akka-spring ------------------- - -The Akka XML schema URI has changed to http://akka.io/schema/akka - -.. code-block:: xml - - - - - - diff --git a/akka-docs/project/migration-guide-0.7.x-0.8.x.rst b/akka-docs/project/migration-guide-0.7.x-0.8.x.rst deleted file mode 100644 index 4bf866a765..0000000000 --- a/akka-docs/project/migration-guide-0.7.x-0.8.x.rst +++ /dev/null @@ -1,94 +0,0 @@ -Migration Guide 0.7.x to 0.8.x -============================== - -This is a case-by-case migration guide from Akka 0.7.x (on Scala 2.7.7) to Akka 0.8.x (on Scala 2.8.x) ------------------------------------------------------------------------------------------------------- - -Cases: ------- - -Actor.send is removed and replaced in full with Actor.! -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: scala - - myActor send "test" - -becomes - -.. code-block:: scala - - myActor ! "test" - -Actor.! now has it's implicit sender defaulted to None -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: scala - - def !(message: Any)(implicit sender: Option[Actor] = None) - -"import Actor.Sender.Self" has been removed because it's not needed anymore -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Remove - -.. code-block:: scala - - import Actor.Sender.Self - -Actor.spawn now uses manifests instead of concrete class types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: scala - - val someActor = spawn(classOf[MyActor]) - -becomes - -.. code-block:: scala - - val someActor = spawn[MyActor] - -Actor.spawnRemote now uses manifests instead of concrete class types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: scala - - val someActor = spawnRemote(classOf[MyActor],"somehost",1337) - -becomes - -.. code-block:: scala - - val someActor = spawnRemote[MyActor]("somehost",1337) - -Actor.spawnLink now uses manifests instead of concrete class types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: scala - - val someActor = spawnLink(classOf[MyActor]) - -becomes - -.. code-block:: scala - - val someActor = spawnLink[MyActor] - -Actor.spawnLinkRemote now uses manifests instead of concrete class types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: scala - - val someActor = spawnLinkRemote(classOf[MyActor],"somehost",1337) - -becomes - -.. code-block:: scala - - val someActor = spawnLinkRemote[MyActor]("somehost",1337) - -**Transaction.atomic and friends are moved into Transaction.Local._ and Transaction.Global._** -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -We now make a difference between transaction management that are local within a thread and global across many threads (and actors). diff --git a/akka-docs/project/migration-guide-0.8.x-0.9.x.rst b/akka-docs/project/migration-guide-0.8.x-0.9.x.rst deleted file mode 100644 index 0d4194bda0..0000000000 --- a/akka-docs/project/migration-guide-0.8.x-0.9.x.rst +++ /dev/null @@ -1,172 +0,0 @@ -Migration Guide 0.8.x to 0.9.x -============================== - -**This document describes between the 0.8.x and the 0.9 release.** - -Background for the new ActorRef -------------------------------- - -In the work towards 0.9 release we have now done a major change to how Actors are created. In short we have separated identity and value, created an 'ActorRef' that holds the actual Actor instance. This allows us to do many great things such as for example: - -* Create serializable, immutable, network-aware Actor references that can be freely shared across the network. They "remember" their origin and will always work as expected. -* Not only kill and restart the same supervised Actor instance when it has crashed (as we do now), but dereference it, throw it away and make it eligible for garbage collection. -* etc. much more - -These work very much like the 'PID' (process id) in Erlang. - -These changes means that there is no difference in defining Actors. You still use the old Actor trait, all methods are there etc. But you can't just new this Actor up and send messages to it since all its public API methods are gone. They now reside in a new class; 'ActorRef' and use need to use instances of this class to interact with the Actor (sending messages etc.). - -Here is a short migration guide with the things that you have to change. It is a big conceptual change but in practice you don't have to change much. - - - -Creating Actors with default constructor ----------------------------------------- - -From: - -.. code-block:: scala - - val a = new MyActor - a ! msg - -To: - -.. code-block:: scala - - import Actor._ - val a = actorOf[MyActor] - a ! msg - -You can also start it in the same statement: - -.. code-block:: scala - - val a = actorOf[MyActor] - -Creating Actors with non-default constructor --------------------------------------------- - -From: - -.. code-block:: scala - - val a = new MyActor(..) - a ! msg - -To: - -.. code-block:: scala - - import Actor._ - val a = actorOf(new MyActor(..)) - a ! msg - -Use of 'self' ActorRef API --------------------------- - -Where you have used 'this' to refer to the Actor from within itself now use 'self': - -.. code-block:: scala - - self ! MessageToMe - -Now the Actor trait only has the callbacks you can implement: -* receive -* postRestart/preRestart -* init/shutdown - -It has no state at all. - -All API has been moved to ActorRef. The Actor is given its ActorRef through the 'self' member variable. -Here you find functions like: -* !, !!, !!! and forward -* link, unlink, startLink, spawnLink etc -* makeTransactional, makeRemote etc. -* start, stop -* etc. - -Here you also find fields like -* dispatcher = ... -* id = ... -* lifeCycle = ... -* faultHandler = ... -* trapExit = ... -* etc. - -This means that to use them you have to prefix them with 'self', like this: - -.. code-block:: scala - - self ! Message - -However, for convenience you can import these functions and fields like below, which will allow you do drop the 'self' prefix: - -.. code-block:: scala - - class MyActor extends Actor { - import self._ - id = ... - dispatcher = ... - spawnLink[OtherActor] - ... - } - -Serialization -------------- - -If you want to serialize it yourself, here is how to do it: - -.. code-block:: scala - - val actorRef1 = actorOf[MyActor] - - val bytes = actorRef1.toBinary - - val actorRef2 = ActorRef.fromBinary(bytes) - -If you are also using Protobuf then you can use the methods that work with Protobuf's Messages directly. - -.. code-block:: scala - - val actorRef1 = actorOf[MyActor] - - val protobufMessage = actorRef1.toProtocol - - val actorRef2 = ActorRef.fromProtocol(protobufMessage) - -Camel ------ - -Some methods of the se.scalablesolutions.akka.camel.Message class have been deprecated in 0.9. These are - -.. code-block:: scala - - package se.scalablesolutions.akka.camel - - case class Message(...) { - // ... - @deprecated def bodyAs[T](clazz: Class[T]): T - @deprecated def setBodyAs[T](clazz: Class[T]): Message - // ... - } - -They will be removed in 1.0. Instead use - -.. code-block:: scala - - package se.scalablesolutions.akka.camel - - case class Message(...) { - // ... - def bodyAs[T](implicit m: Manifest[T]): T = - def setBodyAs[T](implicit m: Manifest[T]): Message - // ... - } - -Usage example: -.. code-block:: scala - - val m = Message(1.4) - val b = m.bodyAs[String] - diff --git a/akka-docs/project/migration-guide-0.9.x-0.10.x.rst b/akka-docs/project/migration-guide-0.9.x-0.10.x.rst deleted file mode 100644 index ceaa42af9b..0000000000 --- a/akka-docs/project/migration-guide-0.9.x-0.10.x.rst +++ /dev/null @@ -1,47 +0,0 @@ -Migration Guide 0.9.x to 0.10.x -=============================== - -Module akka-camel ------------------ - -The following list summarizes the breaking changes since Akka 0.9.1. - -* CamelService moved from package se.scalablesolutions.akka.camel.service one level up to se.scalablesolutions.akka.camel. -* CamelService.newInstance removed. For starting and stopping a CamelService, applications should use - - * CamelServiceManager.startCamelService and - * CamelServiceManager.stopCamelService. - -* Existing def receive = produce method definitions from Producer implementations must be removed (resolves compile error: method receive needs override modifier). -* The Producer.async method and the related Sync trait have been removed. This is now fully covered by Camel's `asynchronous routing engine `_. -* @consume annotation can not placed any longer on actors (i.e. on type-level), only on typed actor methods. Consumer actors must mixin the Consumer trait. -* @consume annotation moved to package se.scalablesolutions.akka.camel. - -Logging -------- - -We've switched to Logback (SLF4J compatible) for the logging, if you're having trouble seeing your log output you'll need to make sure that there's a logback.xml available on the classpath or you'll need to specify the location of the logback.xml file via the system property, ex: -Dlogback.configurationFile=/path/to/logback.xml - -Configuration -------------- - -* The configuration is now JSON-style (see below). -* Now you can define the time-unit to be used throughout the config file: - -.. code-block:: ruby - - akka { - version = "0.10" - time-unit = "seconds" # default timeout time unit for all timeout properties throughout the config - - actor { - timeout = 5 # default timeout for future based invocations - throughput = 5 # default throughput for Dispatcher - } - ... - } - -RemoteClient events -------------------- - -All events now has a reference to the RemoteClient instance instead of 'hostname' and 'port'. This is more flexible. Enables simpler reconnecting etc. diff --git a/akka-docs/project/migration-guide-1.0.x-1.1.x.rst b/akka-docs/project/migration-guide-1.0.x-1.1.x.rst deleted file mode 100644 index 44f09ea244..0000000000 --- a/akka-docs/project/migration-guide-1.0.x-1.1.x.rst +++ /dev/null @@ -1,78 +0,0 @@ - -.. _migration-1.1: - -################################ - Migration Guide 1.0.x to 1.1.x -################################ - -**Akka has now moved to Scala 2.9.x** - - -Akka Actor -========== - -- is now dependency free, with the exception of the dependency on the - ``scala-library.jar`` - -- does not bundle any logging anymore, but you can subscribe to events within - Akka by registering an event handler on akka.event.EventHandler or by specifying - the ``FQN`` of an Actor in the akka.conf under akka.event-handlers; there is an - ``akka-slf4j`` module which still provides the Logging trait and a default - ``SLF4J`` logger adapter. - - Don't forget to add a SLF4J backend though, we recommend: - - .. code-block:: scala - - lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime" - -- If you used HawtDispatcher and want to continue using it, you need to include - akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to - specify: ``akka.dispatch.HawtDispatcherConfigurator`` instead of - ``HawtDispatcher`` - -- FSM: the onTransition method changed from Function1 to PartialFunction; there - is an implicit conversion for the precise types in place, but it may be - necessary to add an underscore if you are passing an eta-expansion (using a - method as function value). - - -Akka Typed Actor -================ - -- All methods starting with ``get*`` are deprecated and will be removed in post - 1.1 release. - - - -Akka Remote -=========== - -- ``UnparsebleException`` has been renamed to - ``CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, - classname, message)`` - - -Akka HTTP -========= - -- akka.servlet.Initializer has been moved to ``akka-kernel`` to be able to have - ``akka-http`` not depend on ``akka-remote``. If you don't want to use the class - for kernel, just create your own version of ``akka.servlet.Initializer``, it's - just a couple of lines of code and there are instructions in - the :ref:`http-module` docs. - -- akka.http.ListWriter has been removed in full, if you use it and want to keep - using it, here's the code: `ListWriter`_. - -- Jersey-server is now a "provided" dependency for ``akka-http``, so you'll need - to add the dependency to your project, it's built against Jersey 1.3 - -.. _ListWriter: https://github.com/jboner/akka/blob/v1.0/akka-http/src/main/scala/akka/http/ListWriter.scala - - -Akka Testkit -============ - -- The TestKit moved into the akka-testkit subproject and correspondingly into the - ``akka.testkit`` package. diff --git a/akka-docs/project/migration-guide-1.1.x-1.2.x.rst b/akka-docs/project/migration-guide-1.1.x-1.2.x.rst deleted file mode 100644 index e4988b9460..0000000000 --- a/akka-docs/project/migration-guide-1.1.x-1.2.x.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. _migration-1.2: - -################################ - Migration Guide 1.1.x to 1.2.x -################################ - diff --git a/akka-docs/project/migration-guide-1.2.x-2.0.x.rst b/akka-docs/project/migration-guide-1.2.x-2.0.x.rst deleted file mode 100644 index 7eabcf2f10..0000000000 --- a/akka-docs/project/migration-guide-1.2.x-2.0.x.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _migration-2.0: - -################################ - Migration Guide 1.2.x to 2.0.x -################################ - -Actors -====== - -The 2.0 release contains several new features which require source-level -changes in client code. This API cleanup is planned to be the last one for a -significant amount of time. - -Lifecycle Callbacks -------------------- - -The :meth:`preRestart(cause: Throwable)` method has been replaced by -:meth:`preRestart(cause: Throwable, lastMessage: Any)`, hence you must insert -the second argument in all overriding methods. The good news is that any missed -actor will not compile without error. diff --git a/akka-docs/project/migration-guide-1.3.x-2.0.x.rst b/akka-docs/project/migration-guide-1.3.x-2.0.x.rst new file mode 100644 index 0000000000..3e815f11ea --- /dev/null +++ b/akka-docs/project/migration-guide-1.3.x-2.0.x.rst @@ -0,0 +1,14 @@ +.. _migration-2.0: + +################################ + Migration Guide 1.3.x to 2.0.x +################################ + +Actors +====== + +The 2.0 release contains several new features which require source-level +changes in client code. This API cleanup is planned to be the last one for a +significant amount of time. + +Detailed migration guide will be written. diff --git a/akka-docs/project/migration-guides.rst b/akka-docs/project/migration-guides.rst index 7af815f241..090e316c22 100644 --- a/akka-docs/project/migration-guides.rst +++ b/akka-docs/project/migration-guides.rst @@ -6,10 +6,4 @@ Migration Guides .. toctree:: :maxdepth: 1 - migration-guide-1.2.x-2.0.x - migration-guide-1.1.x-1.2.x - migration-guide-1.0.x-1.1.x - migration-guide-0.10.x-1.0.x - migration-guide-0.9.x-0.10.x - migration-guide-0.8.x-0.9.x - migration-guide-0.7.x-0.8.x + migration-guide-1.3.x-2.0.x diff --git a/akka-docs/project/release-notes.rst b/akka-docs/project/release-notes.rst index a80c394194..114efaf736 100644 --- a/akka-docs/project/release-notes.rst +++ b/akka-docs/project/release-notes.rst @@ -1,647 +1,7 @@ Release Notes ============== -Release 1.2 +Release 2.0 ----------- -This release, while containing several substantial improvements, focuses on -paving the way for the upcoming 2.0 release. A selection of changes is -presented in the following, for the full list of tickets closed during the -development cycle please refer to -`the issue tracker `_. - -- **Actor:** - - - unified :class:`Channel` abstraction for :class:`Promise` & :class:`Actor` - - - reintegrate invocation tracing (to be enabled per class and globally) - - - make last message available during :meth:`preRestart()` - - - experimental :meth:`freshInstance()` life-cycle hook for priming the new instance during restart - - - new textual primitives :meth:`tell` (``!``) and :meth:`ask` (``?``, formerly ``!!!``) - - - timeout for :meth:`ask` Futures taken from implicit argument (currently with fallback to deprecated ``ActorRef.timeout`` - -- **durable mailboxes:** - - - beanstalk, file, mongo, redis - -- **Future:** - - - :meth:`onTimeout` callback - - - select dispatcher for execution by implicit argument - - - add safer cast methods :meth:`as[T]: T` and :meth:`mapTo[T]: Future[T]` - -- **TestKit:** - - - add :class:`TestProbe` (can receive, reply and forward messages, supports all :class:`TestKit` assertions) - - - add :meth:`TestKit.awaitCond` - - - support global time-factor for all timing assertions (for running on busy CI servers) - -- **FSM:** - - - add :class:`TestFSMRef` - - - add :class:`LoggingFSM` (transition tracing, rolling event log) - -- updated dependencies: - - - Jackson 1.8.0 - - - Netty 3.2.5 - - - Protobuf 2.4.1 - - - ScalaTest 1.6.1 - -- various fixes, small improvements and documentation updates - -- several **deprecations** in preparation for 2.0 - - ================================ ===================== - Method Replacement - ================================ ===================== - Actor.preRestart(cause) Actor.preRestart(cause, lastMsg) - ActorRef.sendOneWay ActorRef.tell - ActorRef.sendOneWaySafe ActorRef.tryTell - ActorRef.sendRequestReply ActorRef.ask(...).get() - ActorRef.sendRequestReplyFuture ActorRef.ask(...).get() - ActorRef.replyUnsafe ActorRef.reply - ActorRef.replySafe ActorRef.tryReply - ActorRef.mailboxSize ActorRef.dispatcher.mailboxSize(actorRef) - ActorRef.sender/senderFuture ActorRef.channel - ActorRef.!! ActorRef.?(...).as[T] - ActorRef.!!! ActorRef.? - ActorRef.reply\_? ActorRef.tryReply - Future.receive Future.onResult - Future.collect Future.map - Future.failure Future.recover - MessageDispatcher.pendingFutures MessageDispatcher.tasks - RemoteClientModule.*Listener(s) EventHandler. - TestKit.expectMsg(pf) TestKit.expectMsgPF - TestKit.receiveWhile(pf) TestKit.receiveWhile()(pf) - ================================ ===================== - -Trivia -^^^^^^ - -This release contains changes to 213 files, with 16053 insertions and 3624 -deletions. The authorship of the corresponding commits is distributed as shown -below; the listing should not be taken too seriously, though, it has just been -done using ``git log --shortstat`` and summing up the numbers, so it certainly -misses details like who originally authored changes which were then back-ported -from the master branch (do not fear, you will be correctly attributed when the -stats for 2.0 are made). - -======= ========== ========= ========= -Commits Insertions Deletions Author -======= ========== ========= ========= - 69 11805 170 Viktor Klang - 34 9694 97 Patrik Nordwall - 72 3563 179 Roland Kuhn - 27 1749 115 Peter Vlugter - 7 238 22 Derek Williams - 4 86 25 Peter Veentjer - 1 17 5 Debasish Ghosh - 2 15 5 Jonas Bonér -======= ========== ========= ========= - -.. note:: - - Release notes of previous releases consisted of ticket or change listings in - no particular order - -Release 1.1 ------------ - -- **ADD** - #647 Extract an akka-camel-typed module out of akka-camel for optional typed actor support (Martin Krasser) -- **ADD** - #654 Allow consumer actors to acknowledge in-only message exchanges (Martin Krasser) -- **ADD** - #669 Support self.reply in preRestart and postStop after exception in receive (Martin Krasser) -- **ADD** - #682 Support for fault-tolerant Producer actors (Martin Krasser) -- **ADD** - Move TestKit to akka-testkit and add CallingThreadDispatcher (Roland Kuhn) -- **ADD** - Remote Client message buffering transaction log for buffering messages failed to send due to network problems. Flushes the buffer on reconnect. (Jonas Bonér) -- **ADD** - Added trait simulate network problems/errors to be used for remote actor testing (Jonas Bonér) -- **ADD** - Add future and await methods to Agent (Peter Vlugter) -- **ADD** - #586 Allow explicit reconnect for RemoteClient (Viktor Klang) -- **ADD** - #587 Dead letter sink queue for messages sent through RemoteClient that didn't get sent due to connection failure (Viktor Klang) -- **ADD** - #598 actor.id when using akka-spring should be the id of the spring bean (Viktor Klang) -- **ADD** - #652 Reap expired futures from ActiveRemoteClientHandler (Viktor Klang) -- **ADD** - #656 Squeeze more out of EBEDD? (Viktor Klang) -- **ADD** - #715 EventHandler.error should be usable without Throwable (Viktor Klang) -- **ADD** - #717 Add ExecutionHandler to NettyRemoteServer for more performance and scalability (Viktor Klang) -- **ADD** - #497 Optimize remote sends done in local scope (Viktor Klang) -- **ADD** - #633 Add support for Scalaz in akka-modules (Derek Williams) -- **ADD** - #677 Add map, flatMap, foreach, and filter to Future (Derek Williams) -- **ADD** - #661 Optimized Future's internals (Derek Williams) -- **ADD** - #685 Optimize execution of Futures (Derek Williams) -- **ADD** - #711 Make Future.completeWith work with an uncompleted Future (Derek Williams) -- **UPD** - #667 Upgrade to Camel 2.7.0 (Martin Krasser) -- **UPD** - Updated HawtDispatch to 1.1 (Hiram Chirino) -- **UPD** - #688 Update Akka 1.1-SNAPSHOT to Scala 2.9.0-RC1 (Viktor Klang) -- **UPD** - #718 Add HawtDispatcher to akka-modules (Viktor Klang) -- **UPD** - #698 Deprecate client-managed actors (Viktor Klang) -- **UPD** - #730 Update Akka and Akka Modules to SBT 0.7.6-RC0 (Viktor Klang) -- **UPD** - #663 Update to latest scalatest (Derek Williams) -- **FIX** - Misc cleanup, API changes and refactorings (Jonas Bonér) -- **FIX** - #675 preStart() is called twice when creating new instance of TypedActor (Debasish Ghosh) -- **FIX** - #704 Write docs for Java Serialization (Debasish Ghosh) -- **FIX** - #645 Change Futures.awaitAll to not throw FutureTimeoutException but return a List[Option[Any]] (Viktor Klang) -- **FIX** - #681 Clean exit using server-managed remote actor via client (Viktor Klang) -- **FIX** - #720 Connection loss when sending to a dead remote actor (Viktor Klang) -- **FIX** - #593 Move Jetty specific stuff (with deps) from akka-http to akka-kernel (Viktor Klang) -- **FIX** - #638 ActiveRemoteClientHandler - Unexpected exception from downstream in remote client (Viktor Klang) -- **FIX** - #655 Remote actors with non-uuid names doesnt work for req./reply-pattern (Viktor Klang) -- **FIX** - #588 RemoteClient.shutdown does not remove client from Map with clients (Viktor Klang) -- **FIX** - #672 Remoting breaks if mutual DNS lookup isn't possible (Viktor Klang) -- **FIX** - #699 Remote typed actor per-session server won't start if called method has no result (Viktor Klang) -- **FIX** - #702 Handle ReadTimeoutException in akka-remote (Viktor Klang) -- **FIX** - #708 Fall back to Akka classloader if event-handler class cannot be found. (Viktor Klang) -- **FIX** - #716 Split akka-http and clean-up dependencies (Viktor Klang) -- **FIX** - #721 Inability to parse/load the Config should do a System.exit(-1) (Viktor Klang) -- **FIX** - #722 Race condition in Actor hotswapping (Viktor Klang) -- **FIX** - #723 MessageSerializer CNFE regression (Viktor Klang) -- **FIX** - #680 Remote TypedActor behavior differs from local one when sending to generic interfaces (Viktor Klang) -- **FIX** - #659 Calling await on a Future that is expired and uncompleted should throw an exception (Derek Williams) -- **REM** - #626 Update and clean up dependencies (Viktor Klang) -- **REM** - #623 Remove embedded-repo (Akka + Akka Modules) (Viktor Klang) -- **REM** - #686 Remove SBinary (Viktor Klang) - -Release 1.0-RC6 ----------------------------------------- - -- **FIX** - #628 Supervied TypedActors fails to restart (Viktor Klang) -- **FIX** - #629 Stuck upon actor invocation (Viktor Klang) - -Release 1.0-RC5 ----------------------------------------- - -- **FIX** - Source JARs published to 'src' instead of 'source' || Odd Moller || -- **FIX** - #612 Conflict between Spring autostart=true for Consumer actors and (Martin Krasser) -- **FIX** - #613 Change Akka XML schema URI to http://akka.io/schema/akka (Martin Krasser) -- **FIX** - Spring XSD namespace changed from 'akkasource.org' to 'akka.io' (Viktor Klang) -- **FIX** - Checking for remote secure cookie is disabled by default if no akka.conf is loaded (Viktor Klang) -- **FIX** - Changed Casbah to ScalaToolsRepo for akka-sbt-plugin (Viktor Klang) -- **FIX** - ActorRef.forward now doesn't require the sender to be set on the message (Viktor Klang) - -Release 1.0-RC3 ----------------------------------------- - -- **ADD** - #568 Add autostart attribute to Spring actor configuration (Viktor Klang) -- **ADD** - #586 Allow explicit reconnect for remote clients (Viktor Klang) -- **ADD** - #587 Add possibility for dead letter queues for failed remote sends (Viktor Klang) -- **ADD** - #497 Optimize remote send in local scope (Viktor Klang) -- **ADD** - Improved Java Actor API: akka.actor.Actors (Viktor Klang) -- **ADD** - Improved Scala Actor API: akka.actor.Actor (Viktor Klang) -- **ADD** - #148 Create a testing framework for testing Actors (Roland Kuhn) -- **ADD** - Support Replica Set/Replica Pair connection modes with MongoDB Persistence || Brendan McAdams || -- **ADD** - User configurable Write Concern settings for MongoDB Persistence || Brendan McAdams || -- **ADD** - Support for configuring MongoDB Persistence with MongoDB's URI Connection String || Brendan McAdams || -- **ADD** - Support for Authentication with MongoDB Persistence || Brendan McAdams || -- **FIX** - Misc bug fixes || Team || -- **FIX** - #603 Race condition in Remote send (Viktor Klang) -- **FIX** - #594 Log statement in RemoteClientHandler was wrongly formatted (Viktor Klang) -- **FIX** - #580 Message uuids must be generated (Viktor Klang) -- **FIX** - #583 Serialization classloader has a visibility issue (Viktor Klang) -- **FIX** - #598 By default the bean ID should become the actor id for Spring actor configuration (Viktor Klang) -- **FIX** - #577 RemoteClientHandler swallows certain exceptions (Viktor Klang) -- **FIX** - #581 Fix edgecase where an exception could not be deserialized (Viktor Klang) -- **FIX** - MongoDB write success wasn't being properly checked; fixed (integrated w/ new write concern features) || Brendan McAdams || -- **UPD** - Improvements to FSM module akka.actor.FSM || Manie & Kuhn || -- **UPD** - Changed Akka URI to http://akka.io. Reflects both XSDs, Maven repositories etc. (Jonas Bonér) -- **REM** - #574 Remote RemoteClient, RemoteServer and RemoteNode (Viktor Klang) -- **REM** - object UntypedActor, object ActorRegistry, class RemoteActor, class RemoteUntypedActor, class RemoteUntypedConsumerActor (Viktor Klang) - -Release 1.0-RC1 ----------------------------------------- - -- **ADD** - #477 Added support for Remote Agents (Viktor Klang) -- **ADD** - #460 Hotswap for Java API (UntypedActor) (Viktor Klang) -- **ADD** - #471 Added support for TypedActors to return Java Option (Viktor Klang) -- **ADD** - New design and API for more fluent and intuitive FSM module (Roland Kuhn) -- **ADD** - Added secure cookie based remote node authentication (Jonas Bonér) -- **ADD** - Untrusted safe mode for remote server (Jonas Bonér) -- **ADD** - Refactored config file format - added list of enabled modules etc. (Jonas Bonér) -- **ADD** - Docs for Dataflow Concurrency (Jonas Bonér) -- **ADD** - Made remote message frame size configurable (Jonas Bonér) -- **ADD** - #496 Detect when Remote Client disconnects (Jonas Bonér) -- **ADD** - #472 Improve API to wait for endpoint activation/deactivation (`more `__ ...) (Martin Krasser) -- **ADD** - #473 Allow consumer actors to customize their own routes (`more `__ ...) (Martin Krasser) -- **ADD** - #504 Add session bound server managed remote actors || Paul Pach || -- **ADD** - DSL for FSM (Irmo Manie) -- **ADD** - Shared unit test for all dispatchers to enforce Actor Model (Viktor Klang) -- **ADD** - #522 Make stacking optional for become and HotSwap (Viktor Klang) -- **ADD** - #524 Make frame size configurable for client&server (Bonér & Klang) -- **ADD** - #526 Add onComplete callback to Future (Viktor Klang) -- **ADD** - #536 Document Channel-abstraction for later replies (Viktor Klang) -- **ADD** - #540 Include self-reference as parameter to HotSwap (Viktor Klang) -- **ADD** - #546 Include Garrick Evans' Akka-mist into master (Viktor Klang) -- **ADD** - #438 Support remove operation in PersistentVector (Scott Clasen) -- **ADD** - #229 Memcached protocol support for Persistence module (Scott Clasen) -- **ADD** - Amazon SimpleDb support for Persistence module (Scott Clasen) -- **FIX** - #518 refactor common storage bakend to use bulk puts/gets where possible (Scott Clasen) -- **FIX** - #532 Prevent persistent datatypes with same uuid from corrupting a TX (Scott Clasen) -- **FIX** - #464 ThreadPoolBuilder should be rewritten to be an immutable builder (Viktor Klang) -- **FIX** - #449 Futures.awaitOne now uses onComplete listeners (Viktor Klang) -- **FIX** - #486 Fixed memory leak caused by Configgy that prevented full unload (Viktor Klang) -- **FIX** - #488 Fixed race condition in EBEDD restart (Viktor Klang) -- **FIX** - #492 Fixed race condition in Scheduler (Viktor Klang) -- **FIX** - #493 Switched to non-https repository for JBoss artifacts (Viktor Klang) -- **FIX** - #481 Exception when creating an actor now behaves properly when supervised (Viktor Klang) -- **FIX** - #498 Fixed no-op in supervision DSL (Viktor Klang) -- **FIX** - #491 ``reply`` and ``reply_?`` now sets a sender reference (Viktor Klang) -- **FIX** - #519 NotSerializableError when using Remote Typed Actors (Viktor Klang) -- **FIX** - #523 Message.toString is called all the time for incomign messages, expensive (Viktor Klang) -- **FIX** - #537 Make sure top folder is included in sources jar (Viktor Klang) -- **FIX** - #529 Remove Scala version number from Akka artifact ids (Viktor Klang) -- **FIX** - #533 Can't set LifeCycle from the Java API (Viktor Klang) -- **FIX** - #542 Make Future-returning Remote Typed Actor methods use onComplete (Viktor Klang) -- **FIX** - #479 Do not register listeners when CamelService is turned off by configuration (Martin Krasser) -- **FIX** - Fixed bug with finding TypedActor by type in ActorRegistry (Jonas Bonér) -- **FIX** - #515 race condition in FSM StateTimeout Handling (Irmo Manie) -- **UPD** - Akka package from "se.scalablesolutions.akka" to "akka" (Viktor Klang) -- **UPD** - Update Netty to 3.2.3.Final (Viktor Klang) -- **UPD** - #458 Camel to 2.5.0 (Martin Krasser) -- **UPD** - #458 Spring to 3.0.4.RELEASE (Martin Krasser) -- **UPD** - #458 Jetty to 7.1.6.v20100715 (Martin Krasser) -- **UPD** - Update to Scala 2.8.1 (Jonas Bonér) -- **UPD** - Changed remote server default port to 2552 (AKKA) (Jonas Bonér) -- **UPD** - Cleaned up and made remote protocol more effifient (Jonas Bonér) -- **UPD** - #528 RedisPersistentRef should not throw in case of missing key (Debasish Ghosh) -- **UPD** - #531 Fix RedisStorage add() method in Java API (Debasish Ghosh) -- **UPD** - #513 Implement snapshot based persistence control in SortedSet (Debasish Ghosh) -- **UPD** - #547 Update FSM docs (Irmo Manie) -- **UPD** - #548 Update AMQP docs (Irmo Manie) -- **REM** - Atmosphere integration, replace with Mist (Klang @ Evans) -- **REM** - JGroups integration, doesn't play with cloud services :/ (Viktor Klang) - -Release 1.0-MILESTONE1 ----------------------------------------- - -- **ADD** - Splitted akka-core up in akka-actor, akka-typed-actor & akka-remote (Jonas Bonér) -- **ADD** - Added meta-data to network protocol (Jonas Bonér) -- **ADD** - HotSwap and actor.become now uses a stack of PartialFunctions with API for pushing and popping the stack (Jonas Bonér) -- **ADD** - #440 Create typed actors with constructor args (Michael Kober) -- **ADD** - #322 Abstraction for unification of sender and senderFuture for later reply (Michael Kober) -- **ADD** - #364 Serialization for TypedActor proxy reference (Michael Kober) -- **ADD** - #423 Support configuration of Akka via Spring (Michael Kober) -- **FIX** - #426 UUID wrong for remote proxy for server managed actor (Michael Kober) -- **ADD** - #378 Support for server initiated remote TypedActor and UntypedActor in Spring config (Michael Kober) -- **ADD** - #194 Support for server-managed typed actor ||< Michael Kober || -- **ADD** - #447 Allow Camel service to be turned off by configuration (Martin Krasser) -- **ADD** - #457 JavaAPI improvements for akka-camel (please read the `migration guide `_) (Martin Krasser) -- **ADD** - #465 Dynamic message routing to actors (`more `__ ...) (Martin Krasser) -- **FIX** - #410 Use log configuration from config directory (Martin Krasser) -- **FIX** - #343 Some problems with persistent structures (Debasish Ghosh) -- **FIX** - #430 Refactor / re-implement MongoDB adapter so that it conforms to the guidelines followed in Redis and Cassandra modules (Debasish Ghosh) -- **FIX** - #436 ScalaJSON serialization does not map Int data types properly when used within a Map (Debasish Ghosh) -- **ADD** - #230 Update redisclient to be Redis 2.0 compliant (Debasish Ghosh) -- **FIX** - #435 Mailbox serialization does not retain messages (Debasish Ghosh) -- **ADD** - #445 Integrate type class based serialization of sjson into Akka (Debasish Ghosh) -- **FIX** - #480: Regression multibulk replies redis client (Debasish Ghosh) -- **FIX** - #415 Publish now generate source and doc jars (Viktor Klang) -- **FIX** - #420 REST endpoints should be able to be processed in parallel (Viktor Klang) -- **FIX** - #422 Dispatcher config should work for ThreadPoolBuilder-based dispatchers (Viktor Klang) -- **FIX** - #401 ActorRegistry should not leak memory (Viktor Klang) -- **FIX** - #250 Performance optimization for Dispatcher (Viktor Klang) -- **FIX** - #419 Rename init and shutdown callbacks to preStart and postStop, and remove initTransactionalState (Viktor Klang) -- **FIX** - #346 Make max no of restarts (and within) are now both optional (Viktor Klang) -- **FIX** - #424 Actors self.supervisor not set by the time init() is called when started by startLink() (Viktor Klang) -- **FIX** - #427 spawnLink and startLink now has the same dispatcher semantics (Viktor Klang) -- **FIX** - #413 Actor shouldn't process more messages when waiting to be restarted (HawtDispatcher still does) (Viktor Klang) -- **FIX** - !! and !!! now do now not block the actor when used in remote actor (Viktor Klang) -- **FIX** - RemoteClient now reconnects properly (Viktor Klang) -- **FIX** - Logger.warn now properly works with varargs (Viktor Klang) -- **FIX** - #450 Removed ActorRef lifeCycle boilerplate: Some(LifeCycle(Permanent)) => Permanent (Viktor Klang) -- **FIX** - Moved ActorRef.trapExit into ActorRef.faultHandler and removed Option-boilerplate from faultHandler (Viktor Klang) -- **FIX** - PinnedDispatcher cheaper for idling actors, also benefits from all that is Dispatcher (Viktor Klang) -- **FIX** - Fixing Futures.future, uses Actor.spawn under the hood, specify dispatcher to control where block is executed (Viktor Klang) -- **FIX** - #469 Akka "dist" now uses a root folder to avoid loitering if unzipped in a folder (Viktor Klang) -- **FIX** - Removed ScalaConfig, JavaConfig and rewrote Supervision configuration (Viktor Klang) -- **UPD** - Jersey to 1.3 (Viktor Klang) -- **UPD** - Atmosphere to 0.6.2 (Viktor Klang) -- **UPD** - Netty to 3.2.2.Final (Viktor Klang) -- **ADD** - Changed config file priority loading and added config modes. (Viktor Klang) -- **ADD** - #411 Bumped Jetty to v 7 and migrated to it's eclipse packages (Viktor Klang) -- **ADD** - #414 Migrate from Grizzly to Jetty for Akka Microkernel (Viktor Klang) -- **ADD** - #261 Add Java API for 'routing' module (Viktor Klang) -- **ADD** - #262 Add Java API for Agent (Viktor Klang) -- **ADD** - #264 Add Java API for Dataflow (Viktor Klang) -- **ADD** - Using JerseySimpleBroadcaster instead of JerseyBroadcaster in AkkaBroadcaster (Viktor Klang) -- **ADD** - #433 Throughput deadline added for Dispatcher (Viktor Klang) -- **ADD** - Add possibility to set default cometSupport in akka.conf (Viktor Klang) -- **ADD** - #451 Added possibility to use akka-http as a standalone REST server (Viktor Klang) -- **ADD** - #446 Added support for Erlang-style receiveTimeout (Viktor Klang) -- **ADD** - #462 Added support for suspend/resume of processing individual actors mailbox, should give clearer restart semantics (Viktor Klang) -- **ADD** - #466 Actor.spawn now takes an implicit dispatcher to specify who should run the block (Viktor Klang) -- **ADD** - #456 Added map to Future and Futures.awaitMap (Viktor Klang) -- **REM** - #418 Remove Lift sample module and docs (Viktor Klang) -- **REM** - Removed all Reactor-based dispatchers (Viktor Klang) -- **REM** - Removed anonymous actor factories (Viktor Klang) -- **ADD** - Voldemort support for akka-persistence (Scott Clasen) -- **ADD** - HBase support for akka-persistence (David Greco) -- **ADD** - CouchDB support for akka-persistence (Yung-Luen Lan & Kahlen) -- **ADD** - #265 Java API for AMQP module (Irmo Manie) - -Release 0.10 - Aug 21 2010 ----------------------------------------- - -- **ADD** - Added new Actor type: UntypedActor for Java API (Jonas Bonér) -- **ADD** - #26 Deep serialization of Actor including its mailbox (Jonas Bonér) -- **ADD** - Rewritten network protocol. More efficient and cleaner. (Jonas Bonér) -- **ADD** - Rewritten Java Active Object tests into Scala to be able to run the in SBT. (Jonas Bonér) -- **ADD** - Added isDefinedAt method to Actor for checking if it can receive a certain message (Jonas Bonér) -- **ADD** - Added caching of Active Object generated class bytes, huge perf improvement (Jonas Bonér) -- **ADD** - Added RemoteClient Listener API (Jonas Bonér) -- **ADD** - Added methods to retrieve children from a Supervisor (Jonas Bonér) -- **ADD** - Rewritten Supervisor to become more clear and "correct" (Jonas Bonér) -- **ADD** - Added options to configure a blocking mailbox with custom capacity (Jonas Bonér) -- **ADD** - Added RemoteClient reconnection time window configuration option (Jonas Bonér) -- **ADD** - Added ActiveObjectContext with sender reference etc (Jonas Bonér) -- **ADD** - #293 Changed config format to JSON-style (Jonas Bonér) -- **ADD** - #302: Incorporate new ReceiveTimeout in Actor serialization (Jonas Bonér) -- **ADD** - Added Java API docs and made it comparable with Scala API docs. 1-1 mirroring (Jonas Bonér) -- **ADD** - Renamed Active Object to Typed Actor (Jonas Bonér) -- **ADD** - Enhanced Typed Actor: remoting, "real" restart upon failure etc. (Jonas Bonér) -- **ADD** - Typed Actor now inherits Actor and is a full citizen in the Actor world. (Jonas Bonér) -- **ADD** - Added support for remotely shutting down a remote actor (Jonas Bonér) -- **ADD** - #224 Add support for Camel in typed actors (`more `__ ...) (Martin Krasser) -- **ADD** - #282 Producer trait should implement Actor.receive (`more `__...) (Martin Krasser) -- **ADD** - #271 Support for bean scope prototype in akka-spring (Johan Rask) -- **ADD** - Support for DI of values and bean references on target instance in akka-spring (Johan Rask) -- **ADD** - #287 Method annotated with @postrestart in ActiveObject is not called during restart (Johan Rask) -- **ADD** - Support for ApplicationContextAware in akka-spring (Johan Rask) -- **ADD** - #199 Support shutdown hook in TypedActor (Martin Krasser) -- **ADD** - #266 Access to typed actors from user-defined Camel routes (`more `__ ...) (Martin Krasser) -- **ADD** - #268 Revise akka-camel documentation (`more `__ ...) (Martin Krasser) -- **ADD** - #289 Support for Spring configuration element (`more `__ ...) (Martin Krasser) -- **ADD** - #296 TypedActor lifecycle management (Martin Krasser) -- **ADD** - #297 Shutdown routes to typed actors (`more `__ ...) (Martin Krasser) -- **ADD** - #314 akka-spring to support typed actor lifecycle management (`more `__ ...) (Martin Krasser) -- **ADD** - #315 akka-spring to support configuration of shutdown callback method (`more `__ ...) (Martin Krasser) -- **ADD** - Fault-tolerant consumer actors and typed consumer actors (`more `__ ...) (Martin Krasser) -- **ADD** - #320 Leverage Camel's non-blocking routing engine (`more `__ ...) (Martin Krasser) -- **ADD** - #335 Producer trait should allow forwarding of results (Martin Krasser) -- **ADD** - #339 Redesign of Producer trait (pre/post processing hooks, async in-out) (`more `__ ...) (Martin Krasser) -- **ADD** - Non-blocking, asynchronous routing example for akka-camel (`more `__ ...) (Martin Krasser) -- **ADD** - #333 Allow applications to wait for endpoints being activated (`more `__ ...) (Martin Krasser) -- **ADD** - #356 Support @consume annotations on typed actor implementation class (Martin Krasser) -- **ADD** - #357 Support untyped Java actors as endpoint consumer (Martin Krasser) -- **ADD** - #366 CamelService should be a singleton (Martin Krasser) -- **ADD** - #392 Support untyped Java actors as endpoint producer (Martin Krasser) -- **ADD** - #393 Redesign CamelService singleton to be a CamelServiceManager (`more `__ ...) (Martin Krasser) -- **ADD** - #295 Refactoring Actor serialization to type classes (Debasish Ghosh) -- **ADD** - #317 Change documentation for Actor Serialization (Debasish Ghosh) -- **ADD** - #388 Typeclass serialization of ActorRef/UntypedActor isn't Java friendly (Debasish Ghosh) -- **ADD** - #292 Add scheduleOnce to Scheduler (Irmo Manie) -- **ADD** - #308 Initial receive timeout on actor (Irmo Manie) -- **ADD** - Redesign of AMQP module (`more `__ ...) (Irmo Manie) -- **ADD** - Added "become(behavior: Option[Receive])" to Actor (Viktor Klang) -- **ADD** - Added "find[T](f: PartialFunction[ActorRef,T]) : Option[T]" to ActorRegistry (Viktor Klang) -- **ADD** - #369 Possibility to configure dispatchers in akka.conf (Viktor Klang) -- **ADD** - #395 Create ability to add listeners to RemoteServer (Viktor Klang) -- **ADD** - #225 Add possibility to use Scheduler from TypedActor (Viktor Klang) -- **ADD** - #61 Integrate new persistent datastructures in Scala 2.8 (Peter Vlugter) -- **ADD** - Expose more of what Multiverse can do (Peter Vlugter) -- **ADD** - #205 STM transaction settings (Peter Vlugter) -- **ADD** - #206 STM transaction deferred and compensating (Peter Vlugter) -- **ADD** - #232 Expose blocking transactions (Peter Vlugter) -- **ADD** - #249 Expose Multiverse Refs for primitives (Peter Vlugter) -- **ADD** - #390 Expose transaction propagation level in multiverse (Peter Vlugter) -- **ADD** - Package objects for importing local/global STM (Peter Vlugter) -- **ADD** - Java API for the STM (Peter Vlugter) -- **ADD** - #379 Create STM Atomic templates for Java API (Peter Vlugter) -- **ADD** - #270 SBT plugin for Akka (Peter Vlugter) -- **ADD** - #198 support for PinnedDispatcher in Spring config (Michael Kober) -- **ADD** - #377 support HawtDispatcher in Spring config (Michael Kober) -- **ADD** - #376 support Spring config for untyped actors (Michael Kober) -- **ADD** - #200 support WorkStealingDispatcher in Spring config (Michael Kober) -- **UPD** - #336 RabbitMQ 1.8.1 (Irmo Manie) -- **UPD** - #288 Netty to 3.2.1.Final (Viktor Klang) -- **UPD** - Atmosphere to 0.6.1 (Viktor Klang) -- **UPD** - Lift to 2.8.0-2.1-M1 (Viktor Klang) -- **UPD** - Camel to 2.4.0 (Martin Krasser) -- **UPD** - Spring to 3.0.3.RELEASE (Martin Krasser) -- **UPD** - Multiverse to 0.6 (Peter Vlugter) -- **FIX** - Fixed bug with stm not being enabled by default when no AKKA_HOME is set (Jonas Bonér) -- **FIX** - Fixed bug in network manifest serialization (Jonas Bonér) -- **FIX** - Fixed bug Remote Actors (Jonas Bonér) -- **FIX** - Fixed memory leak in Active Objects (Jonas Bonér) -- **FIX** - Fixed indeterministic deadlock in Transactor restart (Jonas Bonér) -- **FIX** - #325 Fixed bug in STM with dead hanging CountDownCommitBarrier (Jonas Bonér) -- **FIX** - #316: NoSuchElementException during ActiveObject restart (Jonas Bonér) -- **FIX** - #256: Tests for ActiveObjectContext (Jonas Bonér) -- **FIX** - Fixed bug in restart of Actors with 'Temporary' life-cycle (Jonas Bonér) -- **FIX** - #280 Tests fail if there is no akka.conf set (Jonas Bonér) -- **FIX** - #286 unwanted transitive dependencies from Geronimo project (Viktor Klang) -- **FIX** - Atmosphere comet comment to use stream instead of writer (Viktor Klang) -- **FIX** - #285 akka.conf is now used as defaults for Akka REST servlet init parameters (Viktor Klang) -- **FIX** - #321 fixed performance regression in ActorRegistry (Viktor Klang) -- **FIX** - #286 geronimo servlet 2.4 dep is no longer transitively loaded (Viktor Klang) -- **FIX** - #334 partial lift sample rewrite to fix breakage (Viktor Klang) -- **FIX** - Fixed a memory leak in ActorRegistry (Viktor Klang) -- **FIX** - Fixed a race-condition in Cluster (Viktor Klang) -- **FIX** - #355 Switched to Array instead of List on ActorRegistry return types (Viktor Klang) -- **FIX** - #352 ActorRegistry.actorsFor(class) now checks isAssignableFrom (Viktor Klang) -- **FIX** - Fixed a race condition in ActorRegistry.register (Viktor Klang) -- **FIX** - #337 Switched from Configgy logging to SLF4J, better for OSGi (Viktor Klang) -- **FIX** - #372 Scheduler now returns Futures to cancel tasks (Viktor Klang) -- **FIX** - #306 JSON serialization between remote actors is not transparent (Debasish Ghosh) -- **FIX** - #204 Reduce object creation in STM (Peter Vlugter) -- **FIX** - #253 Extend Multiverse BasicRef rather than wrap ProgrammaticRef (Peter Vlugter) -- **REM** - Removed pure POJO-style Typed Actor (old Active Object) (Jonas Bonér) -- **REM** - Removed Lift as a dependency for Akka-http (Viktor Klang) -- **REM** - #294 Remove ``reply`` and ``reply_?`` from Actor (Viktor Klang) -- **REM** - Removed one field in Actor, should be a minor memory reduction for high actor quantities (Viktor Klang) -- **FIX** - #301 DI does not work in akka-spring when specifying an interface (Johan Rask) -- **FIX** - #328 trapExit should pass through self with Exit to supervisor (Irmo Manie) -- **FIX** - Fixed warning when deregistering listeners (Martin Krasser) -- **FIX** - Added camel-jetty-2.4.0.1 to Akka's embedded-repo. (fixes a concurrency bug in camel-jetty-2.4.0, to be officially released in Camel 2.5.0) (Martin Krasser) -- **FIX** - #338 RedisStorageBackend fails when redis closes connection to idle client (Debasish Ghosh) -- **FIX** - #340 RedisStorage Map.get does not throw exception when disconnected from redis but returns None (Debasish Ghosh) - -Release 0.9 - June 2th 2010 ----------------------------------------- - -- **ADD** - Serializable, immutable, network-aware ActorRefs (Jonas Bonér) -- **ADD** - Optionally JTA-aware STM transactions (Jonas Bonér) -- **ADD** - Rewritten supervisor management, making use of ActorRef, now really kills the Actor instance and replaces it (Jonas Bonér) -- **ADD** - Allow linking and unlinking a declaratively configured Supervisor (Jonas Bonér) -- **ADD** - Remote protocol rewritten to allow passing along sender reference in all situations (Jonas Bonér) -- **ADD** - #37 API for JTA usage (Jonas Bonér) -- **ADD** - Added user accessible 'sender' and 'senderFuture' references (Jonas Bonér) -- **ADD** - Sender actor is now passed along for all message send functions (!, !!, !!!, forward) (Jonas Bonér) -- **ADD** - Subscription API for listening to RemoteClient failures (Jonas Bonér) -- **ADD** - Implemented link/unlink for ActiveObjects || Jan Kronquist / Michael Kober || -- **ADD** - Added alter method to TransactionalRef + added appl(initValue) to Transactional Map/Vector/Ref (Peter Vlugter) -- **ADD** - Load dependency JARs in JAR deloyed in kernel's ,/deploy dir (Jonas Bonér) -- **ADD** - Allowing using Akka without specifying AKKA_HOME or path to akka.conf config file (Jonas Bonér) -- **ADD** - Redisclient now supports PubSub (Debasish Ghosh) -- **ADD** - Added a sample project under akka-samples for Redis PubSub using Akka actors (Debasish Ghosh) -- **ADD** - Richer API for Actor.reply (Viktor Klang) -- **ADD** - Added Listeners to Akka patterns (Viktor Klang) -- **ADD** - #183 Deactivate endpoints of stopped consumer actors (Martin Krasser) -- **ADD** - Camel `Message API improvements `_ (Martin Krasser) -- **ADD** - #83 Send notification to parent supervisor if all actors supervised by supervisor has been permanently killed (Jonas Bonér) -- **ADD** - #121 Make it possible to dynamically create supervisor hierarchies for Active Objects (Michael Kober) -- **ADD** - #131 Subscription API for node joining & leaving cluster (Jonas Bonér) -- **ADD** - #145 Register listener for errors in RemoteClient/RemoteServer (Jonas Bonér) -- **ADD** - #146 Create an additional distribution with sources (Jonas Bonér) -- **ADD** - #149 Support loading JARs from META-INF/lib in JARs put into the ./deploy directory (Jonas Bonér) -- **ADD** - #166 Implement insertVectorStorageEntriesFor in CassandraStorageBackend (Jonas Bonér) -- **ADD** - #168 Separate ID from Value in Actor; introduce ActorRef (Jonas Bonér) -- **ADD** - #174 Create sample module for remote actors (Jonas Bonér) -- **ADD** - #175 Add new sample module with Peter Vlugter's Ant demo (Jonas Bonér) -- **ADD** - #177 Rewrite remote protocol to make use of new ActorRef (Jonas Bonér) -- **ADD** - #180 Make use of ActorRef indirection for fault-tolerance management (Jonas Bonér) -- **ADD** - #184 Upgrade to Netty 3.2.0.CR1 (Jonas Bonér) -- **ADD** - #185 Rewrite Agent and Supervisor to work with new ActorRef (Jonas Bonér) -- **ADD** - #188 Change the order of how the akka.conf is detected (Jonas Bonér) -- **ADD** - #189 Reintroduce 'sender: Option[Actor]' ref in Actor (Jonas Bonér) -- **ADD** - #203 Upgrade to Scala 2.8 RC2 (Jonas Bonér) -- **ADD** - #222 Using Akka without AKKA_HOME or akka.conf (Jonas Bonér) -- **ADD** - #234 Add support for injection and management of ActiveObjectContext with RTTI such as 'sender' and 'senderFuture' references etc. (Jonas Bonér) -- **ADD** - #236 Upgrade SBinary to Scala 2.8 RC2 (Jonas Bonér) -- **ADD** - #235 Problem with RedisStorage.getVector(..) data structure storage management (Jonas Bonér) -- **ADD** - #239 Upgrade to Camel 2.3.0 (Martin Krasser) -- **ADD** - #242 Upgraded to Scala 2.8 RC3 (Jonas Bonér) -- **ADD** - #243 Upgraded to Protobuf 2.3.0 (Jonas Bonér) -- **ADD** - Added option to specify class loader when de-serializing messages and RemoteActorRef in RemoteClient (Jonas Bonér) -- **ADD** - #238 Upgrading to Cassandra 0.6.1 (Jonas Bonér) -- **ADD** - Upgraded to Jersey 1.2 (Viktor Klang) -- **ADD** - Upgraded Atmosphere to 0.6-SNAPSHOT, adding WebSocket support (Viktor Klang) -- **FIX** - Simplified ActiveObject configuration (Michael Kober) -- **FIX** - #237 Upgrade Mongo Java driver to 1.4 (the latest stable release) (Debasish Ghosh) -- **FIX** - #165 Implemented updateVectorStorageEntryFor in Mongo persistence module (Debasish Ghosh) -- **FIX** - #154: Allow ActiveObjects to use the default timeout in config file (Michael Kober) -- **FIX** - Active Object methods with @inittransactionalstate should be invoked automatically (Michael Kober) -- **FIX** - Nested supervisor hierarchy failure propagation bug fixed (Jonas Bonér) -- **FIX** - Fixed bug on CommitBarrier transaction registration (Jonas Bonér) -- **FIX** - Merged many modules to reduce total number of modules (Viktor Klang) -- **FIX** - Future parameterized (Viktor Klang) -- **FIX** - #191: Workstealing dispatcher didn't work with !! (Viktor Klang) -- **FIX** - #202: Allow applications to disable stream-caching (Martin Krasser) -- **FIX** - #119 Problem with Cassandra-backed Vector (Jonas Bonér) -- **FIX** - #147 Problem replying to remote sender when message sent with ! (Jonas Bonér) -- **FIX** - #171 initial value of Ref can become null if first transaction rolled back (Jonas Bonér) -- **FIX** - #172 Fix "broken" Protobuf serialization API (Jonas Bonér) -- **FIX** - #173 Problem with Vector::slice in CassandraStorage (Jonas Bonér) -- **FIX** - #190 RemoteClient shutdown ends up in endless loop (Jonas Bonér) -- **FIX** - #211 Problem with getting CommitBarrierOpenException when using Transaction.Global (Jonas Bonér) -- **FIX** - #240 Supervised actors not started when starting supervisor (Jonas Bonér) -- **FIX** - Fixed problem with Transaction.Local not committing to persistent storage (Jonas Bonér) -- **FIX** - #215: Re-engineered the JAX-RS support (Viktor Klang) -- **FIX** - Many many bug fixes || Team || -- **REM** - Shoal cluster module (Viktor Klang) - -Release 0.8.1 - April 6th 2010 ----------------------------------------- - -- **ADD** - Redis cluster support (Debasish Ghosh) -- **ADD** - Reply to remote sender from message set with ! (Jonas Bonér) -- **ADD** - Load-balancer which prefers actors with few messages in mailbox || Jan Van Besien || -- **ADD** - Added developer mailing list: [akka-dev AT googlegroups DOT com] (Jonas Bonér) -- **FIX** - Separated thread-local from thread-global transaction API (Jonas Bonér) -- **FIX** - Fixed bug in using STM outside Actors (Jonas Bonér) -- **FIX** - Fixed bug in anonymous actors (Jonas Bonér) -- **FIX** - Moved web initializer to new akka-servlet module (Viktor Klang) - -Release 0.8 - March 31st 2010 ----------------------------------------- - -- **ADD** - Scala 2.8 based (Viktor Klang) -- **ADD** - Monadic API for Agents (Jonas Bonér) -- **ADD** - Agents are transactional (Jonas Bonér) -- **ADD** - Work-stealing dispatcher || Jan Van Besien || -- **ADD** - Improved Spring integration (Michael Kober) -- **FIX** - Various bugfixes || Team || -- **FIX** - Improved distribution packaging (Jonas Bonér) -- **REMOVE** - Actor.send function (Jonas Bonér) - -Release 0.7 - March 21st 2010 ----------------------------------------- - -- **ADD** - Rewritten STM now works generically with fire-forget message flows (Jonas Bonér) -- **ADD** - Apache Camel integration (Martin Krasser) -- **ADD** - Spring integration (Michael Kober) -- **ADD** - Server-managed Remote Actors (Jonas Bonér) -- **ADD** - Clojure-style Agents (Viktor Klang) -- **ADD** - Shoal cluster backend (Viktor Klang) -- **ADD** - Redis-based transactional queue storage backend (Debasish Ghosh) -- **ADD** - Redis-based transactional sorted set storage backend (Debasish Ghosh) -- **ADD** - Redis-based atomic INC (index) operation (Debasish Ghosh) -- **ADD** - Distributed Comet (Viktor Klang) -- **ADD** - Project moved to SBT (simple-build-tool) || Peter Hausel || -- **ADD** - Futures object with utility methods for Future's (Jonas Bonér) -- **ADD** - !!! function that returns a Future (Jonas Bonér) -- **ADD** - Richer ActorRegistry API (Jonas Bonér) -- **FIX** - Improved event-based dispatcher performance with 40% || Jan Van Besien || -- **FIX** - Improved remote client pipeline performance (Viktor Klang) -- **FIX** - Support several Clusters on the same network (Viktor Klang) -- **FIX** - Structural package refactoring (Jonas Bonér) -- **FIX** - Various bugs fixed || Team || - -Release 0.6 - January 5th 2010 ----------------------------------------- - -- **ADD** - Clustered Comet using Akka remote actors and clustered membership API (Viktor Klang) -- **ADD** - Cluster membership API and implementation based on JGroups (Viktor Klang) -- **ADD** - Security module for HTTP-based authentication and authorization (Viktor Klang) -- **ADD** - Support for using Scala XML tags in RESTful Actors (scala-jersey) (Viktor Klang) -- **ADD** - Support for Comet Actors using Atmosphere (Viktor Klang) -- **ADD** - MongoDB as Akka storage backend (Debasish Ghosh) -- **ADD** - Redis as Akka storage backend (Debasish Ghosh) -- **ADD** - Transparent JSON serialization of Scala objects based on SJSON (Debasish Ghosh) -- **ADD** - Kerberos/SPNEGO support for Security module || Eckhart Hertzler || -- **ADD** - Implicit sender for remote actors: Remote actors are able to use reply to answer a request || Mikael Högqvist || -- **ADD** - Support for using the Lift Web framework with Actors || Tim Perrett || -- **ADD** - Added CassandraSession API (with socket pooling) wrapping Cassandra's Thrift API in Scala and Java APIs (Jonas Bonér) -- **ADD** - Rewritten STM, now integrated with Multiverse STM (Jonas Bonér) -- **ADD** - Added STM API for atomic {..} and run {..} orElse {..} (Jonas Bonér) -- **ADD** - Added STM retry (Jonas Bonér) -- **ADD** - AMQP integration; abstracted as actors in a supervisor hierarchy. Impl AMQP 0.9.1 (Jonas Bonér) -- **ADD** - Complete rewrite of the persistence transaction management, now based on Unit of Work and Multiverse STM (Jonas Bonér) -- **ADD** - Monadic API to TransactionalRef (use it in for-comprehension) (Jonas Bonér) -- **ADD** - Lightweight actor syntax using one of the Actor.actor(..) methods. F.e: 'val a = actor { case _ => .. }' (Jonas Bonér) -- **ADD** - Rewritten event-based dispatcher which improved perfomance by 10x, now substantially faster than event-driven Scala Actors (Jonas Bonér) -- **ADD** - New Scala JSON parser based on sjson (Jonas Bonér) -- **ADD** - Added zlib compression to remote actors (Jonas Bonér) -- **ADD** - Added implicit sender reference for fire-forget ('!') message sends (Jonas Bonér) -- **ADD** - Monadic API to TransactionalRef (use it in for-comprehension) (Jonas Bonér) -- **ADD** - Smoother web app integration; just add akka.conf to the classpath (WEB-INF/classes), no need for AKKA_HOME or -Dakka.conf=.. (Jonas Bonér) -- **ADD** - Modularization of distribution into a thin core (actors, remoting and STM) and the rest in submodules (Jonas Bonér) -- **ADD** - Added 'forward' to Actor, forwards message but keeps original sender address (Jonas Bonér) -- **ADD** - JSON serialization for Java objects (using Jackson) (Jonas Bonér) -- **ADD** - JSON serialization for Scala objects (using SJSON) (Jonas Bonér) -- **ADD** - Added implementation for remote actor reconnect upon failure (Jonas Bonér) -- **ADD** - Protobuf serialization for Java and Scala objects (Jonas Bonér) -- **ADD** - SBinary serialization for Scala objects (Jonas Bonér) -- **ADD** - Protobuf as remote protocol (Jonas Bonér) -- **ADD** - Updated Cassandra integration and CassandraSession API to v0.4 (Jonas Bonér) -- **ADD** - CassandraStorage is now works with external Cassandra cluster (Jonas Bonér) -- **ADD** - ActorRegistry for retrieving Actor instances by class name and by id (Jonas Bonér) -- **ADD** - SchedulerActor for scheduling periodic tasks (Jonas Bonér) -- **ADD** - Now start up kernel with 'java -jar dist/akka-0.6.jar' (Jonas Bonér) -- **ADD** - Added Akka user mailing list: akka-user AT googlegroups DOT com]] (Jonas Bonér) -- **ADD** - Improved and restructured documentation (Jonas Bonér) -- **ADD** - New URL: http://akkasource.org (Jonas Bonér) -- **ADD** - New and much improved docs (Jonas Bonér) -- **ADD** - Enhanced trapping of failures: 'trapExit = List(classOf[..], classOf[..])' (Jonas Bonér) -- **ADD** - Upgraded to Netty 3.2, Protobuf 2.2, ScalaTest 1.0, Jersey 1.1.3, Atmosphere 0.4.1, Cassandra 0.4.1, Configgy 1.4 (Jonas Bonér) -- **FIX** - Lowered actor memory footprint; now an actor consumes ~600 bytes, which mean that you can create 6.5 million on 4 GB RAM (Jonas Bonér) -- **FIX** - Remote actors are now defined by their UUID (not class name) (Jonas Bonér) -- **FIX** - Fixed dispatcher bugs (Jonas Bonér) -- **FIX** - Cleaned up Maven scripts and distribution in general (Jonas Bonér) -- **FIX** - Fixed many many bugs and minor issues (Jonas Bonér) -- **FIX** - Fixed inconsistencies and uglyness in Actors API (Jonas Bonér) -- **REMOVE** - Removed concurrent mode (Jonas Bonér) -- **REMOVE** - Removed embedded Cassandra mode (Jonas Bonér) -- **REMOVE** - Removed the !? method in Actor (synchronous message send, since it's evil. Use !! with time-out instead. (Jonas Bonér) -- **REMOVE** - Removed startup scripts and lib dir (Jonas Bonér) -- **REMOVE** - Removed the 'Transient' life-cycle scope since to close to 'Temporary' in semantics. (Jonas Bonér) -- **REMOVE** - Removed 'Transient' Actors and restart timeout (Jonas Bonér) +Release notes for 2.0 will be written. diff --git a/akka-docs/scala/index.rst b/akka-docs/scala/index.rst index cbcca36b6c..e4f7e838fe 100644 --- a/akka-docs/scala/index.rst +++ b/akka-docs/scala/index.rst @@ -17,5 +17,4 @@ Scala API dispatchers routing fsm - http testing From fb4faab85092c121c4d842a8de3a4416088a2b3c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 12:35:44 +0100 Subject: [PATCH 23/27] DOC: fixed other-doc --- akka-docs/project/other-doc.rst | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/akka-docs/project/other-doc.rst b/akka-docs/project/other-doc.rst index 7abfc8c8df..aeba2d9c13 100644 --- a/akka-docs/project/other-doc.rst +++ b/akka-docs/project/other-doc.rst @@ -13,13 +13,24 @@ Automatically published documentation for the latest SNAPSHOT version of Akka ca be found here: - Akka - http://akka.io/docs/akka/snapshot/ (or in `PDF format `__) -- Akka Modules - http://akka.io/docs/akka-modules/snapshot/ (or in `PDF format `__) Release Versions ================ +1.3 +--- + +- Akka 1.3 - http://akka.io/docs/akka/1.3/ (or in `PDF format `__) +- Akka Modules 1.3 - http://akka.io/docs/akka-modules/1.3/ (or in `PDF format `__) + +1.2 +--- + +- Akka 1.2 - http://akka.io/docs/akka/1.2/ (or in `PDF format `__) +- Akka Modules 1.2 - http://akka.io/docs/akka-modules/1.2/ (or in `PDF format `__) + 1.1 --- From 4e9c7de1e6d309e30ee4593de1f3fde279be1a46 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 12:41:41 +0100 Subject: [PATCH 24/27] DOC: fixed links --- akka-docs/project/links.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/akka-docs/project/links.rst b/akka-docs/project/links.rst index d562c28b72..aba41acd3e 100644 --- a/akka-docs/project/links.rst +++ b/akka-docs/project/links.rst @@ -25,7 +25,6 @@ Akka uses Git and is hosted at `Github `_. * Akka: clone the Akka repository from ``_ -* Akka Modules: clone the Akka Modules repository from ``_ `Maven Repository `_ @@ -50,14 +49,14 @@ directly. SNAPSHOT Versions ================= -Nightly builds are available in ``_ repository as +Nightly builds are available in ``_ repository as timestamped snapshot versions. Pick a timestamp from -``_. +``_. All Akka modules that belong to the same build have the same timestamp. Make sure that you add the repository to the sbt resolvers or maven repositories:: - resolvers += "Typesafe Timestamp Repo" at "http://repo.typesafe.com/typesafe/maven-timestamps/" + resolvers += "Typesafe Timestamp Repo" at "http://repo.typesafe.com/typesafe/akka-snapshots/" Define the library dependencies with the timestamp as version:: From 57b03c56ae6f6e70babee8f204ade117bd446452 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 12:49:43 +0100 Subject: [PATCH 25/27] DOC: minor corr --- akka-docs/additional/benchmarks.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-docs/additional/benchmarks.rst b/akka-docs/additional/benchmarks.rst index 6008b98f05..6080203e91 100644 --- a/akka-docs/additional/benchmarks.rst +++ b/akka-docs/additional/benchmarks.rst @@ -11,7 +11,6 @@ Simple Trading system. Compares: -- Synchronous Scala solution - Scala library Actors - Fire-forget From 9098f30524b0cb3108b3507ac7dd5c2211d14062 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 14:21:03 +0100 Subject: [PATCH 26/27] DOC: Disabled stm and transactors documentation --- akka-docs/disabled/java-stm.rst | 515 ++++++++++++++++++++++ akka-docs/disabled/java-transactors.rst | 271 ++++++++++++ akka-docs/disabled/scala-stm.rst | 537 +++++++++++++++++++++++ akka-docs/disabled/scala-transactors.rst | 250 +++++++++++ akka-docs/java/stm.rst | 511 +-------------------- akka-docs/java/transactors.rst | 267 +---------- akka-docs/scala/stm.rst | 531 +--------------------- akka-docs/scala/transactors.rst | 246 +---------- 8 files changed, 1577 insertions(+), 1551 deletions(-) create mode 100644 akka-docs/disabled/java-stm.rst create mode 100644 akka-docs/disabled/java-transactors.rst create mode 100644 akka-docs/disabled/scala-stm.rst create mode 100644 akka-docs/disabled/scala-transactors.rst diff --git a/akka-docs/disabled/java-stm.rst b/akka-docs/disabled/java-stm.rst new file mode 100644 index 0000000000..3cbf390bd1 --- /dev/null +++ b/akka-docs/disabled/java-stm.rst @@ -0,0 +1,515 @@ +.. _stm-java: + +Software Transactional Memory (Java) +==================================== + +.. sidebar:: Contents + + .. contents:: :local: + +Module stability: **SOLID** + +Overview of STM +--------------- + +An `STM `_ turns the Java heap into a transactional data set with begin/commit/rollback semantics. Very much like a regular database. It implements the first three letters in ACID; ACI: +* (failure) Atomicity: all changes during the execution of a transaction make it, or none make it. This only counts for transactional datastructures. +* Consistency: a transaction gets a consistent of reality (in Akka you get the Oracle version of the SERIALIZED isolation level). +* Isolated: changes made by concurrent execution transactions are not visible to each other. + +Generally, the STM is not needed that often when working with Akka. Some use-cases (that we can think of) are: + +- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +- When you want to share a datastructure across actors. +- When you need to use the persistence modules. + +Akka’s STM implements the concept in `Clojure’s `_ STM view on state in general. Please take the time to read `this excellent document `_ and view `this presentation `_ by Rich Hickey (the genius behind Clojure), since it forms the basis of Akka’s view on STM and state in general. + +The STM is based on Transactional References (referred to as Refs). Refs are memory cells, holding an (arbitrary) immutable value, that implement CAS (Compare-And-Swap) semantics and are managed and enforced by the STM for coordinated changes across many Refs. They are implemented using the excellent `Multiverse STM `_. + +Working with immutable collections can sometimes give bad performance due to extensive copying. Scala provides so-called persistent datastructures which makes working with immutable collections fast. They are immutable but with constant time access and modification. The use of structural sharing and an insert or update does not ruin the old structure, hence “persistent”. Makes working with immutable composite types fast. The persistent datastructures currently consist of a Map and Vector. + +Simple example +-------------- + +Here is a simple example of an incremental counter using STM. This shows creating a ``Ref``, a transactional reference, and then modifying it within a transaction, which is delimited by an ``Atomic`` anonymous inner class. + +.. code-block:: java + + import akka.stm.*; + + final Ref ref = new Ref(0); + + public int counter() { + return new Atomic() { + public Integer atomically() { + int inc = ref.get() + 1; + ref.set(inc); + return inc; + } + }.execute(); + } + + counter(); + // -> 1 + + counter(); + // -> 2 + + +Ref +--- + +Refs (transactional references) are mutable references to values and through the STM allow the safe sharing of mutable data. To ensure safety the value stored in a Ref should be immutable. The value referenced by a Ref can only be accessed or swapped within a transaction. Refs separate identity from value. + +Creating a Ref +^^^^^^^^^^^^^^ + +You can create a Ref with or without an initial value. + +.. code-block:: java + + import akka.stm.*; + + // giving an initial value + final Ref ref = new Ref(0); + + // specifying a type but no initial value + final Ref ref = new Ref(); + +Accessing the value of a Ref +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use ``get`` to access the value of a Ref. Note that if no initial value has been given then the value is initially ``null``. + +.. code-block:: java + + import akka.stm.*; + + final Ref ref = new Ref(0); + + Integer value = new Atomic() { + public Integer atomically() { + return ref.get(); + } + }.execute(); + // -> value = 0 + +Changing the value of a Ref +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), which sets the new value and returns the old value. + +.. code-block:: java + + import akka.stm.*; + + final Ref ref = new Ref(0); + + new Atomic() { + public Object atomically() { + return ref.set(5); + } + }.execute(); + + +Transactions +------------ + +A transaction is delimited using an ``Atomic`` anonymous inner class. + +.. code-block:: java + + new Atomic() { + public Object atomically() { + // ... + } + }.execute(); + +All changes made to transactional objects are isolated from other changes, all make it or non make it (so failure atomicity) and are consistent. With the AkkaSTM you automatically have the Oracle version of the SERIALIZED isolation level, lower isolation is not possible. To make it fully serialized, set the writeskew property that checks if a writeskew problem is allowed to happen. + +Retries +^^^^^^^ + +A transaction is automatically retried when it runs into some read or write conflict, until the operation completes, an exception (throwable) is thrown or when there are too many retries. When a read or writeconflict is encountered, the transaction uses a bounded exponential backoff to prevent cause more contention and give other transactions some room to complete. + +If you are using non transactional resources in an atomic block, there could be problems because a transaction can be retried. If you are using print statements or logging, it could be that they are called more than once. So you need to be prepared to deal with this. One of the possible solutions is to work with a deferred or compensating task that is executed after the transaction aborts or commits. + +Unexpected retries +^^^^^^^^^^^^^^^^^^ + +It can happen for the first few executions that you get a few failures of execution that lead to unexpected retries, even though there is not any read or writeconflict. The cause of this is that speculative transaction configuration/selection is used. There are transactions optimized for a single transactional object, for 1..n and for n to unlimited. So based on the execution of the transaction, the system learns; it begins with a cheap one and upgrades to more expensive ones. Once it has learned, it will reuse this knowledge. It can be activated/deactivated using the speculative property on the TransactionFactoryBuilder. In most cases it is best use the default value (enabled) so you get more out of performance. + +Coordinated transactions and Transactors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you need coordinated transactions across actors or threads then see :ref:`transactors-java`. + +Configuring transactions +^^^^^^^^^^^^^^^^^^^^^^^^ + +It's possible to configure transactions. The ``Atomic`` class can take a ``TransactionFactory``, which can determine properties of the transaction. A default transaction factory is used if none is specified. You can create a ``TransactionFactory`` with a ``TransactionFactoryBuilder``. + +Configuring transactions with a ``TransactionFactory``: + +.. code-block:: java + + import akka.stm.*; + + TransactionFactory txFactory = new TransactionFactoryBuilder() + .setReadonly(true) + .build(); + + new Atomic(txFactory) { + public Object atomically() { + // read only transaction + return ...; + } + }.execute(); + +The following settings are possible on a TransactionFactory: + +- familyName - Family name for transactions. Useful for debugging because the familyName is shown in exceptions, logging and in the future also will be used for profiling. +- readonly - Sets transaction as readonly. Readonly transactions are cheaper and can be used to prevent modification to transactional objects. +- maxRetries - The maximum number of times a transaction will retry. +- timeout - The maximum time a transaction will block for. +- trackReads - Whether all reads should be tracked. Needed for blocking operations. Readtracking makes a transaction more expensive, but makes subsequent reads cheaper and also lowers the chance of a readconflict. +- writeSkew - Whether writeskew is allowed. Disable with care. +- blockingAllowed - Whether explicit retries are allowed. +- interruptible - Whether a blocking transaction can be interrupted if it is blocked. +- speculative - Whether speculative configuration should be enabled. +- quickRelease - Whether locks should be released as quickly as possible (before whole commit). +- propagation - For controlling how nested transactions behave. +- traceLevel - Transaction trace level. + +You can also specify the default values for some of these options in :ref:`configuration`. + +Transaction lifecycle listeners +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +It's possible to have code that will only run on the successful commit of a transaction, or when a transaction aborts. You can do this by adding ``deferred`` or ``compensating`` blocks to a transaction. + +.. code-block:: java + + import akka.stm.*; + import static akka.stm.StmUtils.deferred; + import static akka.stm.StmUtils.compensating; + + new Atomic() { + public Object atomically() { + deferred(new Runnable() { + public void run() { + // executes when transaction commits + } + }); + compensating(new Runnable() { + public void run() { + // executes when transaction aborts + } + }); + // ... + return something; + } + }.execute(); + +Blocking transactions +^^^^^^^^^^^^^^^^^^^^^ + +You can block in a transaction until a condition is met by using an explicit ``retry``. To use ``retry`` you also need to configure the transaction to allow explicit retries. + +Here is an example of using ``retry`` to block until an account has enough money for a withdrawal. This is also an example of using actors and STM together. + +.. code-block:: java + + import akka.stm.*; + + public class Transfer { + private final Ref from; + private final Ref to; + private final double amount; + + public Transfer(Ref from, Ref to, double amount) { + this.from = from; + this.to = to; + this.amount = amount; + } + + public Ref getFrom() { return from; } + public Ref getTo() { return to; } + public double getAmount() { return amount; } + } + +.. code-block:: java + + import akka.stm.*; + import static akka.stm.StmUtils.retry; + import akka.actor.*; + import akka.util.FiniteDuration; + import java.util.concurrent.TimeUnit; + import akka.event.EventHandler; + + public class Transferer extends UntypedActor { + TransactionFactory txFactory = new TransactionFactoryBuilder() + .setBlockingAllowed(true) + .setTrackReads(true) + .setTimeout(new FiniteDuration(60, TimeUnit.SECONDS)) + .build(); + + public void onReceive(Object message) throws Exception { + if (message instanceof Transfer) { + Transfer transfer = (Transfer) message; + final Ref from = transfer.getFrom(); + final Ref to = transfer.getTo(); + final double amount = transfer.getAmount(); + new Atomic(txFactory) { + public Object atomically() { + if (from.get() < amount) { + EventHandler.info(this, "not enough money - retrying"); + retry(); + } + EventHandler.info(this, "transferring"); + from.set(from.get() - amount); + to.set(to.get() + amount); + return null; + } + }.execute(); + } + } + } + +.. code-block:: java + + import akka.stm.*; + import akka.actor.*; + + public class Main { + public static void main(String...args) throws Exception { + final Ref account1 = new Ref(100.0); + final Ref account2 = new Ref(100.0); + + ActorRef transferer = Actors.actorOf(Transferer.class); + + transferer.tell(new Transfer(account1, account2, 500.0)); + // Transferer: not enough money - retrying + + new Atomic() { + public Object atomically() { + return account1.set(account1.get() + 2000); + } + }.execute(); + // Transferer: transferring + + Thread.sleep(1000); + + Double acc1 = new Atomic() { + public Double atomically() { + return account1.get(); + } + }.execute(); + + Double acc2 = new Atomic() { + public Double atomically() { + return account2.get(); + } + }.execute(); + + + + System.out.println("Account 1: " + acc1); + // Account 1: 1600.0 + + System.out.println("Account 2: " + acc2); + // Account 2: 600.0 + + transferer.stop(); + } + } + +Alternative blocking transactions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can also have two alternative blocking transactions, one of which can succeed first, with ``EitherOrElse``. + +.. code-block:: java + + import akka.stm.*; + + public class Branch { + private final Ref left; + private final Ref right; + private final double amount; + + public Branch(Ref left, Ref right, int amount) { + this.left = left; + this.right = right; + this.amount = amount; + } + + public Ref getLeft() { return left; } + + public Ref getRight() { return right; } + + public double getAmount() { return amount; } + } + +.. code-block:: java + + import akka.actor.*; + import akka.stm.*; + import static akka.stm.StmUtils.retry; + import akka.util.FiniteDuration; + import java.util.concurrent.TimeUnit; + import akka.event.EventHandler; + + public class Brancher extends UntypedActor { + TransactionFactory txFactory = new TransactionFactoryBuilder() + .setBlockingAllowed(true) + .setTrackReads(true) + .setTimeout(new FiniteDuration(60, TimeUnit.SECONDS)) + .build(); + + public void onReceive(Object message) throws Exception { + if (message instanceof Branch) { + Branch branch = (Branch) message; + final Ref left = branch.getLeft(); + final Ref right = branch.getRight(); + final double amount = branch.getAmount(); + new Atomic(txFactory) { + public Integer atomically() { + return new EitherOrElse() { + public Integer either() { + if (left.get() < amount) { + EventHandler.info(this, "not enough on left - retrying"); + retry(); + } + EventHandler.info(this, "going left"); + return left.get(); + } + public Integer orElse() { + if (right.get() < amount) { + EventHandler.info(this, "not enough on right - retrying"); + retry(); + } + EventHandler.info(this, "going right"); + return right.get(); + } + }.execute(); + } + }.execute(); + } + } + } + +.. code-block:: java + + import akka.stm.*; + import akka.actor.*; + + public class Main2 { + public static void main(String...args) throws Exception { + final Ref left = new Ref(100); + final Ref right = new Ref(100); + + ActorRef brancher = Actors.actorOf(Brancher.class); + + brancher.tell(new Branch(left, right, 500)); + // not enough on left - retrying + // not enough on right - retrying + + Thread.sleep(1000); + + new Atomic() { + public Object atomically() { + return right.set(right.get() + 1000); + } + }.execute(); + // going right + + + + brancher.stop(); + } + } + + +Transactional datastructures +---------------------------- + +Akka provides two datastructures that are managed by the STM. + +- TransactionalMap +- TransactionalVector + +TransactionalMap and TransactionalVector look like regular mutable datastructures, they even implement the standard Scala 'Map' and 'RandomAccessSeq' interfaces, but they are implemented using persistent datastructures and managed references under the hood. Therefore they are safe to use in a concurrent environment. Underlying TransactionalMap is HashMap, an immutable Map but with near constant time access and modification operations. Similarly TransactionalVector uses a persistent Vector. See the Persistent Datastructures section below for more details. + +Like managed references, TransactionalMap and TransactionalVector can only be modified inside the scope of an STM transaction. + +Here is an example of creating and accessing a TransactionalMap: + +.. code-block:: java + + import akka.stm.*; + + // assuming a User class + + final TransactionalMap users = new TransactionalMap(); + + // fill users map (in a transaction) + new Atomic() { + public Object atomically() { + users.put("bill", new User("bill")); + users.put("mary", new User("mary")); + users.put("john", new User("john")); + return null; + } + }.execute(); + + // access users map (in a transaction) + User user = new Atomic() { + public User atomically() { + return users.get("bill").get(); + } + }.execute(); + +Here is an example of creating and accessing a TransactionalVector: + +.. code-block:: java + + import akka.stm.*; + + // assuming an Address class + + final TransactionalVector
    addresses = new TransactionalVector
    (); + + // fill addresses vector (in a transaction) + new Atomic() { + public Object atomically() { + addresses.add(new Address("somewhere")); + addresses.add(new Address("somewhere else")); + return null; + } + }.execute(); + + // access addresses vector (in a transaction) + Address address = new Atomic
    () { + public Address atomically() { + return addresses.get(0); + } + }.execute(); + + +Persistent datastructures +------------------------- + +Akka's STM should only be used with immutable data. This can be costly if you have large datastructures and are using a naive copy-on-write. In order to make working with immutable datastructures fast enough Scala provides what are called Persistent Datastructures. There are currently two different ones: + +- HashMap (`scaladoc `__) +- Vector (`scaladoc `__) + +They are immutable and each update creates a completely new version but they are using clever structural sharing in order to make them almost as fast, for both read and update, as regular mutable datastructures. + +This illustration is taken from Rich Hickey's presentation. Copyright Rich Hickey 2009. + +.. image:: ../images/clojure-trees.png + + diff --git a/akka-docs/disabled/java-transactors.rst b/akka-docs/disabled/java-transactors.rst new file mode 100644 index 0000000000..cc069e3d9c --- /dev/null +++ b/akka-docs/disabled/java-transactors.rst @@ -0,0 +1,271 @@ +.. _transactors-java: + +Transactors (Java) +================== + +.. sidebar:: Contents + + .. contents:: :local: + +Module stability: **SOLID** + +Why Transactors? +---------------- + +Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see `this JavaOne presentation `_. + +**STM** on the other hand is excellent for problems where you need consensus and a stable view of the state by providing compositional transactional shared state. Some of the really nice traits of STM are that transactions compose, and it raises the abstraction level from lock-based concurrency. + +Akka's Transactors combine Actors and STM to provide the best of the Actor model (concurrency and asynchronous event-based programming) and STM (compositional transactional shared state) by providing transactional, compositional, asynchronous, event-based message flows. + +If you need Durability then you should not use one of the in-memory data structures but one of the persistent ones. + +Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: + +- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +- When you want to share a datastructure across actors. +- When you need to use the persistence modules. + +Actors and STM +^^^^^^^^^^^^^^ + +You can combine Actors and STM in several ways. An Actor may use STM internally so that particular changes are guaranteed to be atomic. Actors may also share transactional datastructures as the STM provides safe shared state across threads. + +It's also possible to coordinate transactions across Actors or threads so that either the transactions in a set all commit successfully or they all fail. This is the focus of Transactors and the explicit support for coordinated transactions in this section. + + +Coordinated transactions +------------------------ + +Akka provides an explicit mechanism for coordinating transactions across actors. Under the hood it uses a ``CountDownCommitBarrier``, similar to a CountDownLatch. + +Here is an example of coordinating two simple counter UntypedActors so that they both increment together in coordinated transactions. If one of them was to fail to increment, the other would also fail. + +.. code-block:: java + + import akka.actor.ActorRef; + + public class Increment { + private final ActorRef friend; + + public Increment() { + this.friend = null; + } + + public Increment(ActorRef friend) { + this.friend = friend; + } + + public boolean hasFriend() { + return friend != null; + } + + public ActorRef getFriend() { + return friend; + } + } + +.. code-block:: java + + import akka.actor.UntypedActor; + import akka.stm.Ref; + import akka.transactor.Atomically; + import akka.transactor.Coordinated; + + public class Counter extends UntypedActor { + private Ref count = new Ref(0); + + private void increment() { + count.set(count.get() + 1); + } + + public void onReceive(Object incoming) throws Exception { + if (incoming instanceof Coordinated) { + Coordinated coordinated = (Coordinated) incoming; + Object message = coordinated.getMessage(); + if (message instanceof Increment) { + Increment increment = (Increment) message; + if (increment.hasFriend()) { + increment.getFriend().tell(coordinated.coordinate(new Increment())); + } + coordinated.atomic(new Atomically() { + public void atomically() { + increment(); + } + }); + } + } else if (incoming.equals("GetCount")) { + getContext().reply(count.get()); + } + } + } + +.. code-block:: java + + ActorRef counter1 = actorOf(Counter.class); + ActorRef counter2 = actorOf(Counter.class); + + counter1.tell(new Coordinated(new Increment(counter2))); + +To start a new coordinated transaction that you will also participate in, just create a ``Coordinated`` object: + +.. code-block:: java + + Coordinated coordinated = new Coordinated(); + +To start a coordinated transaction that you won't participate in yourself you can create a ``Coordinated`` object with a message and send it directly to an actor. The recipient of the message will be the first member of the coordination set: + +.. code-block:: java + + actor.tell(new Coordinated(new Message())); + +To include another actor in the same coordinated transaction that you've created or received, use the ``coordinate`` method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. + +.. code-block:: java + + actor.tell(coordinated.coordinate(new Message())); + +To enter the coordinated transaction use the atomic method of the coordinated object. This accepts either an ``akka.transactor.Atomically`` object, or an ``Atomic`` object the same as used normally in the STM (just don't execute it - the coordination will do that). + +.. code-block:: java + + coordinated.atomic(new Atomically() { + public void atomically() { + // do something in a transaction + } + }); + +The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. + + +UntypedTransactor +----------------- + +UntypedTransactors are untyped actors that provide a general pattern for coordinating transactions, using the explicit coordination described above. + +Here's an example of a simple untyped transactor that will join a coordinated transaction: + +.. code-block:: java + + import akka.transactor.UntypedTransactor; + import akka.stm.Ref; + + public class Counter extends UntypedTransactor { + Ref count = new Ref(0); + + @Override + public void atomically(Object message) { + if (message instanceof Increment) { + count.set(count.get() + 1); + } + } + } + +You could send this Counter transactor a ``Coordinated(Increment)`` message. If you were to send it just an ``Increment`` message it will create its own ``Coordinated`` (but in this particular case wouldn't be coordinating transactions with any other transactors). + +To coordinate with other transactors override the ``coordinate`` method. The ``coordinate`` method maps a message to a set of ``SendTo`` objects, pairs of ``ActorRef`` and a message. You can use the ``include`` and ``sendTo`` methods to easily coordinate with other transactors. + +Example of coordinating an increment, similar to the explicitly coordinated example: + +.. code-block:: java + + import akka.transactor.UntypedTransactor; + import akka.transactor.SendTo; + import akka.stm.Ref; + + import java.util.Set; + + public class Counter extends UntypedTransactor { + Ref count = new Ref(0); + + @Override + public Set coordinate(Object message) { + if (message instanceof Increment) { + Increment increment = (Increment) message; + if (increment.hasFriend()) + return include(increment.getFriend(), new Increment()); + } + return nobody(); + } + + @Override + public void atomically(Object message) { + if (message instanceof Increment) { + count.set(count.get() + 1); + } + } + } + +To execute directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. They do not execute within the transaction. + +To completely bypass coordinated transactions override the ``normally`` method. Any message matched by ``normally`` will not be matched by the other methods, and will not be involved in coordinated transactions. In this method you can implement normal actor behavior, or use the normal STM atomic for local transactions. + + +Coordinating Typed Actors +------------------------- + +It's also possible to use coordinated transactions with typed actors. You can explicitly pass around ``Coordinated`` objects, or use built-in support with the ``@Coordinated`` annotation and the ``Coordination.coordinate`` method. + +To specify a method should use coordinated transactions add the ``@Coordinated`` annotation. **Note**: the ``@Coordinated`` annotation will only work with void (one-way) methods. + +.. code-block:: java + + public interface Counter { + @Coordinated public void increment(); + public Integer get(); + } + +To coordinate transactions use a ``coordinate`` block. This accepts either an ``akka.transactor.Atomically`` object, or an ``Atomic`` object liked used in the STM (but don't execute it). The first boolean parameter specifies whether or not to wait for the transactions to complete. + +.. code-block:: java + + Coordination.coordinate(true, new Atomically() { + public void atomically() { + counter1.increment(); + counter2.increment(); + } + }); + +Here's an example of using ``@Coordinated`` with a TypedActor to coordinate increments: + +.. code-block:: java + + import akka.transactor.annotation.Coordinated; + + public interface Counter { + @Coordinated public void increment(); + public Integer get(); + } + +.. code-block:: java + + import akka.actor.TypedActor; + import akka.stm.Ref; + + public class CounterImpl extends TypedActor implements Counter { + private Ref count = new Ref(0); + + public void increment() { + count.set(count.get() + 1); + } + + public Integer get() { + return count.get(); + } + } + +.. code-block:: java + + Counter counter1 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); + Counter counter2 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); + + Coordination.coordinate(true, new Atomically() { + public void atomically() { + counter1.increment(); + counter2.increment(); + } + }); + + TypedActor.stop(counter1); + TypedActor.stop(counter2); + diff --git a/akka-docs/disabled/scala-stm.rst b/akka-docs/disabled/scala-stm.rst new file mode 100644 index 0000000000..f21f988939 --- /dev/null +++ b/akka-docs/disabled/scala-stm.rst @@ -0,0 +1,537 @@ + +.. _stm-scala: + +####################################### + Software Transactional Memory (Scala) +####################################### + +.. sidebar:: Contents + + .. contents:: :local: + +Overview of STM +=============== + +An `STM `_ turns the +Java heap into a transactional data set with begin/commit/rollback +semantics. Very much like a regular database. It implements the first three +letters in ACID; ACI: + +* Atomic +* Consistent +* Isolated + +Generally, the STM is not needed very often when working with Akka. Some +use-cases (that we can think of) are: + +- When you really need composable message flows across many actors updating + their **internal local** state but need them to do that atomically in one big + transaction. Might not be often, but when you do need this then you are + screwed without it. +- When you want to share a datastructure across actors. +- When you need to use the persistence modules. + +Akka’s STM implements the concept in `Clojure's `_ STM view on state in +general. Please take the time to read `this excellent document `_ +and view `this presentation `_ by Rich Hickey (the genius +behind Clojure), since it forms the basis of Akka’s view on STM and state in +general. + +.. _clojure: http://clojure.org/ +.. _clojure-state: http://clojure.org/state +.. _clojure-presentation: http://www.infoq.com/presentations/Value-Identity-State-Rich-Hickey + +The STM is based on Transactional References (referred to as Refs). Refs are +memory cells, holding an (arbitrary) immutable value, that implement CAS +(Compare-And-Swap) semantics and are managed and enforced by the STM for +coordinated changes across many Refs. They are implemented using the excellent +`Multiverse STM `_. + +.. _multiverse: http://multiverse.codehaus.org/overview.html + +Working with immutable collections can sometimes give bad performance due to +extensive copying. Scala provides so-called persistent datastructures which +makes working with immutable collections fast. They are immutable but with +constant time access and modification. They use structural sharing and an insert +or update does not ruin the old structure, hence “persistent”. Makes working +with immutable composite types fast. The persistent datastructures currently +consist of a Map and Vector. + + +Simple example +============== + +Here is a simple example of an incremental counter using STM. This shows +creating a ``Ref``, a transactional reference, and then modifying it within a +transaction, which is delimited by ``atomic``. + +.. includecode:: code/StmDocSpec.scala#simple + + +Ref +--- + +Refs (transactional references) are mutable references to values and through the STM allow the safe sharing of mutable data. Refs separate identity from value. To ensure safety the value stored in a Ref should be immutable (they can of course contain refs themselves). The value referenced by a Ref can only be accessed or swapped within a transaction. If a transaction is not available, the call will be executed in its own transaction (the call will be atomic). This is a different approach than the Clojure Refs, where a missing transaction results in an error. + +Creating a Ref +^^^^^^^^^^^^^^ + +You can create a Ref with or without an initial value. + +.. code-block:: scala + + import akka.stm._ + + // giving an initial value + val ref = Ref(0) + + // specifying a type but no initial value + val ref = Ref[Int] + +Accessing the value of a Ref +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use ``get`` to access the value of a Ref. Note that if no initial value has been given then the value is initially ``null``. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(0) + + atomic { + ref.get + } + // -> 0 + +If there is a chance that the value of a Ref is null then you can use ``opt``, which will create an Option, either Some(value) or None, or you can provide a default value with ``getOrElse``. You can also check for null using ``isNull``. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref[Int] + + atomic { + ref.opt // -> None + ref.getOrElse(0) // -> 0 + ref.isNull // -> true + } + +Changing the value of a Ref +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), which sets the new value and returns the old value. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(0) + + atomic { + ref.set(5) + } + // -> 0 + + atomic { + ref.get + } + // -> 5 + +You can also use ``alter`` which accepts a function that takes the old value and creates a new value of the same type. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(0) + + atomic { + ref alter (_ + 5) + } + // -> 5 + + val inc = (i: Int) => i + 1 + + atomic { + ref alter inc + } + // -> 6 + +Refs in for-comprehensions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ref is monadic and can be used in for-comprehensions. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(1) + + atomic { + for (value <- ref) { + // do something with value + } + } + + val anotherRef = Ref(3) + + atomic { + for { + value1 <- ref + value2 <- anotherRef + } yield (value1 + value2) + } + // -> Ref(4) + + val emptyRef = Ref[Int] + + atomic { + for { + value1 <- ref + value2 <- emptyRef + } yield (value1 + value2) + } + // -> Ref[Int] + + +Transactions +------------ + +A transaction is delimited using ``atomic``. + +.. code-block:: scala + + atomic { + // ... + } + +All changes made to transactional objects are isolated from other changes, all make it or non make it (so failure atomicity) and are consistent. With the AkkaSTM you automatically have the Oracle version of the SERIALIZED isolation level, lower isolation is not possible. To make it fully serialized, set the writeskew property that checks if a writeskew problem is allowed to happen. + +Retries +^^^^^^^ + +A transaction is automatically retried when it runs into some read or write conflict, until the operation completes, an exception (throwable) is thrown or when there are too many retries. When a read or writeconflict is encountered, the transaction uses a bounded exponential backoff to prevent cause more contention and give other transactions some room to complete. + +If you are using non transactional resources in an atomic block, there could be problems because a transaction can be retried. If you are using print statements or logging, it could be that they are called more than once. So you need to be prepared to deal with this. One of the possible solutions is to work with a deferred or compensating task that is executed after the transaction aborts or commits. + +Unexpected retries +^^^^^^^^^^^^^^^^^^ + +It can happen for the first few executions that you get a few failures of execution that lead to unexpected retries, even though there is not any read or writeconflict. The cause of this is that speculative transaction configuration/selection is used. There are transactions optimized for a single transactional object, for 1..n and for n to unlimited. So based on the execution of the transaction, the system learns; it begins with a cheap one and upgrades to more expensive ones. Once it has learned, it will reuse this knowledge. It can be activated/deactivated using the speculative property on the TransactionFactory. In most cases it is best use the default value (enabled) so you get more out of performance. + +Coordinated transactions and Transactors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you need coordinated transactions across actors or threads then see :ref:`transactors-scala`. + +Configuring transactions +^^^^^^^^^^^^^^^^^^^^^^^^ + +It's possible to configure transactions. The ``atomic`` method can take an implicit or explicit ``TransactionFactory``, which can determine properties of the transaction. A default transaction factory is used if none is specified explicitly or there is no implicit ``TransactionFactory`` in scope. + +Configuring transactions with an **implicit** ``TransactionFactory``: + +.. code-block:: scala + + import akka.stm._ + + implicit val txFactory = TransactionFactory(readonly = true) + + atomic { + // read only transaction + } + +Configuring transactions with an **explicit** ``TransactionFactory``: + +.. code-block:: scala + + import akka.stm._ + + val txFactory = TransactionFactory(readonly = true) + + atomic(txFactory) { + // read only transaction + } + +The following settings are possible on a TransactionFactory: + +- ``familyName`` - Family name for transactions. Useful for debugging. +- ``readonly`` - Sets transaction as readonly. Readonly transactions are cheaper. +- ``maxRetries`` - The maximum number of times a transaction will retry. +- ``timeout`` - The maximum time a transaction will block for. +- ``trackReads`` - Whether all reads should be tracked. Needed for blocking operations. +- ``writeSkew`` - Whether writeskew is allowed. Disable with care. +- ``blockingAllowed`` - Whether explicit retries are allowed. +- ``interruptible`` - Whether a blocking transaction can be interrupted. +- ``speculative`` - Whether speculative configuration should be enabled. +- ``quickRelease`` - Whether locks should be released as quickly as possible (before whole commit). +- ``propagation`` - For controlling how nested transactions behave. +- ``traceLevel`` - Transaction trace level. + +You can also specify the default values for some of these options in the :ref:`configuration`. + +You can also determine at which level a transaction factory is shared or not shared, which affects the way in which the STM can optimise transactions. + +Here is a shared transaction factory for all instances of an actor. + +.. code-block:: scala + + import akka.actor._ + import akka.stm._ + + object MyActor { + implicit val txFactory = TransactionFactory(readonly = true) + } + + class MyActor extends Actor { + import MyActor.txFactory + + def receive = { + case message: String => + atomic { + // read only transaction + } + } + } + +Here's a similar example with an individual transaction factory for each instance of an actor. + +.. code-block:: scala + + import akka.actor._ + import akka.stm._ + + class MyActor extends Actor { + implicit val txFactory = TransactionFactory(readonly = true) + + def receive = { + case message: String => + atomic { + // read only transaction + } + } + } + +Transaction lifecycle listeners +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +It's possible to have code that will only run on the successful commit of a transaction, or when a transaction aborts. You can do this by adding ``deferred`` or ``compensating`` blocks to a transaction. + +.. code-block:: scala + + import akka.stm._ + + atomic { + deferred { + // executes when transaction commits + } + compensating { + // executes when transaction aborts + } + } + +Blocking transactions +^^^^^^^^^^^^^^^^^^^^^ + +You can block in a transaction until a condition is met by using an explicit ``retry``. To use ``retry`` you also need to configure the transaction to allow explicit retries. + +Here is an example of using ``retry`` to block until an account has enough money for a withdrawal. This is also an example of using actors and STM together. + +.. code-block:: scala + + import akka.stm._ + import akka.actor._ + import akka.util.duration._ + import akka.event.EventHandler + + type Account = Ref[Double] + + case class Transfer(from: Account, to: Account, amount: Double) + + class Transferer extends Actor { + implicit val txFactory = TransactionFactory(blockingAllowed = true, trackReads = true, timeout = 60 seconds) + + def receive = { + case Transfer(from, to, amount) => + atomic { + if (from.get < amount) { + EventHandler.info(this, "not enough money - retrying") + retry + } + EventHandler.info(this, "transferring") + from alter (_ - amount) + to alter (_ + amount) + } + } + } + + val account1 = Ref(100.0) + val account2 = Ref(100.0) + + val transferer = Actor.actorOf(new Transferer) + + transferer ! Transfer(account1, account2, 500.0) + // INFO Transferer: not enough money - retrying + + atomic { account1 alter (_ + 2000) } + // INFO Transferer: transferring + + atomic { account1.get } + // -> 1600.0 + + atomic { account2.get } + // -> 600.0 + + transferer.stop() + +Alternative blocking transactions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can also have two alternative blocking transactions, one of which can succeed first, with ``either-orElse``. + +.. code-block:: scala + + import akka.stm._ + import akka.actor._ + import akka.util.duration._ + import akka.event.EventHandler + + case class Branch(left: Ref[Int], right: Ref[Int], amount: Int) + + class Brancher extends Actor { + implicit val txFactory = TransactionFactory(blockingAllowed = true, trackReads = true, timeout = 60 seconds) + + def receive = { + case Branch(left, right, amount) => + atomic { + either { + if (left.get < amount) { + EventHandler.info(this, "not enough on left - retrying") + retry + } + log.info("going left") + } orElse { + if (right.get < amount) { + EventHandler.info(this, "not enough on right - retrying") + retry + } + log.info("going right") + } + } + } + } + + val ref1 = Ref(0) + val ref2 = Ref(0) + + val brancher = Actor.actorOf(new Brancher) + + brancher ! Branch(ref1, ref2, 1) + // INFO Brancher: not enough on left - retrying + // INFO Brancher: not enough on right - retrying + + atomic { ref2 alter (_ + 1) } + // INFO Brancher: not enough on left - retrying + // INFO Brancher: going right + + brancher.stop() + + +Transactional datastructures +---------------------------- + +Akka provides two datastructures that are managed by the STM. + +- ``TransactionalMap`` +- ``TransactionalVector`` + +``TransactionalMap`` and ``TransactionalVector`` look like regular mutable datastructures, they even implement the standard Scala 'Map' and 'RandomAccessSeq' interfaces, but they are implemented using persistent datastructures and managed references under the hood. Therefore they are safe to use in a concurrent environment. Underlying TransactionalMap is HashMap, an immutable Map but with near constant time access and modification operations. Similarly ``TransactionalVector`` uses a persistent Vector. See the Persistent Datastructures section below for more details. + +Like managed references, ``TransactionalMap`` and ``TransactionalVector`` can only be modified inside the scope of an STM transaction. + +*IMPORTANT*: There have been some problems reported when using transactional datastructures with 'lazy' initialization. Avoid that. + +Here is how you create these transactional datastructures: + +.. code-block:: scala + + import akka.stm._ + + // assuming something like + case class User(name: String) + case class Address(location: String) + + // using initial values + val map = TransactionalMap("bill" -> User("bill")) + val vector = TransactionalVector(Address("somewhere")) + + // specifying types + val map = TransactionalMap[String, User] + val vector = TransactionalVector[Address] + +``TransactionalMap`` and ``TransactionalVector`` wrap persistent datastructures with transactional references and provide a standard Scala interface. This makes them convenient to use. + +Here is an example of using a ``Ref`` and a ``HashMap`` directly: + +.. code-block:: scala + + import akka.stm._ + import scala.collection.immutable.HashMap + + case class User(name: String) + + val ref = Ref(HashMap[String, User]()) + + atomic { + val users = ref.get + val newUsers = users + ("bill" -> User("bill")) // creates a new HashMap + ref.swap(newUsers) + } + + atomic { + ref.get.apply("bill") + } + // -> User("bill") + +Here is the same example using ``TransactionalMap``: + +.. code-block:: scala + + import akka.stm._ + + case class User(name: String) + + val users = TransactionalMap[String, User] + + atomic { + users += "bill" -> User("bill") + } + + atomic { + users("bill") + } + // -> User("bill") + + +Persistent datastructures +------------------------- + +Akka's STM should only be used with immutable data. This can be costly if you have large datastructures and are using a naive copy-on-write. In order to make working with immutable datastructures fast enough Scala provides what are called Persistent Datastructures. There are currently two different ones: + +* ``HashMap`` (`scaladoc `__) +* ``Vector`` (`scaladoc `__) + +They are immutable and each update creates a completely new version but they are using clever structural sharing in order to make them almost as fast, for both read and update, as regular mutable datastructures. + +This illustration is taken from Rich Hickey's presentation. Copyright Rich Hickey 2009. + +.. image:: ../images/clojure-trees.png + + +Ants simulation sample +---------------------- + +One fun and very enlightening visual demo of STM, actors and transactional references is the `Ant simulation sample `_. I encourage you to run it and read through the code since it's a good example of using actors with STM. diff --git a/akka-docs/disabled/scala-transactors.rst b/akka-docs/disabled/scala-transactors.rst new file mode 100644 index 0000000000..1c1154eb06 --- /dev/null +++ b/akka-docs/disabled/scala-transactors.rst @@ -0,0 +1,250 @@ +.. _transactors-scala: + +Transactors (Scala) +=================== + +.. sidebar:: Contents + + .. contents:: :local: + +Module stability: **SOLID** + +Why Transactors? +---------------- + +Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see `this JavaOne presentation `_. + +**STM** on the other hand is excellent for problems where you need consensus and a stable view of the state by providing compositional transactional shared state. Some of the really nice traits of STM are that transactions compose, and it raises the abstraction level from lock-based concurrency. + +Akka's Transactors combine Actors and STM to provide the best of the Actor model (concurrency and asynchronous event-based programming) and STM (compositional transactional shared state) by providing transactional, compositional, asynchronous, event-based message flows. + +If you need Durability then you should not use one of the in-memory data structures but one of the persistent ones. + +Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: + +- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +- When you want to share a datastructure across actors. +- When you need to use the persistence modules. + +Actors and STM +^^^^^^^^^^^^^^ + +You can combine Actors and STM in several ways. An Actor may use STM internally so that particular changes are guaranteed to be atomic. Actors may also share transactional datastructures as the STM provides safe shared state across threads. + +It's also possible to coordinate transactions across Actors or threads so that either the transactions in a set all commit successfully or they all fail. This is the focus of Transactors and the explicit support for coordinated transactions in this section. + + +Coordinated transactions +------------------------ + +Akka provides an explicit mechanism for coordinating transactions across Actors. Under the hood it uses a ``CountDownCommitBarrier``, similar to a CountDownLatch. + +Here is an example of coordinating two simple counter Actors so that they both increment together in coordinated transactions. If one of them was to fail to increment, the other would also fail. + +.. code-block:: scala + + import akka.transactor.Coordinated + import akka.stm.Ref + import akka.actor.{Actor, ActorRef} + + case class Increment(friend: Option[ActorRef] = None) + case object GetCount + + class Counter extends Actor { + val count = Ref(0) + + def receive = { + case coordinated @ Coordinated(Increment(friend)) => { + friend foreach (_ ! coordinated(Increment())) + coordinated atomic { + count alter (_ + 1) + } + } + case GetCount => self.reply(count.get) + } + } + + val counter1 = Actor.actorOf[Counter] + val counter2 = Actor.actorOf[Counter] + + counter1 ! Coordinated(Increment(Some(counter2))) + + ... + + (counter1 ? GetCount).as[Int] // Some(1) + + counter1.stop() + counter2.stop() + +To start a new coordinated transaction that you will also participate in, just create a ``Coordinated`` object: + +.. code-block:: scala + + val coordinated = Coordinated() + +To start a coordinated transaction that you won't participate in yourself you can create a ``Coordinated`` object with a message and send it directly to an actor. The recipient of the message will be the first member of the coordination set: + +.. code-block:: scala + + actor ! Coordinated(Message) + +To receive a coordinated message in an actor simply match it in a case statement: + +.. code-block:: scala + + def receive = { + case coordinated @ Coordinated(Message) => ... + } + +To include another actor in the same coordinated transaction that you've created or received, use the apply method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. + +.. code-block:: scala + + actor ! coordinated(Message) + +To enter the coordinated transaction use the atomic method of the coordinated object: + +.. code-block:: scala + + coordinated atomic { + // do something in transaction ... + } + +The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. + + +Transactor +---------- + +Transactors are actors that provide a general pattern for coordinating transactions, using the explicit coordination described above. + +Here's an example of a simple transactor that will join a coordinated transaction: + +.. code-block:: scala + + import akka.transactor.Transactor + import akka.stm.Ref + + case object Increment + + class Counter extends Transactor { + val count = Ref(0) + + override def atomically = { + case Increment => count alter (_ + 1) + } + } + +You could send this Counter transactor a ``Coordinated(Increment)`` message. If you were to send it just an ``Increment`` message it will create its own ``Coordinated`` (but in this particular case wouldn't be coordinating transactions with any other transactors). + +To coordinate with other transactors override the ``coordinate`` method. The ``coordinate`` method maps a message to a set of ``SendTo`` objects, pairs of ``ActorRef`` and a message. You can use the ``include`` and ``sendTo`` methods to easily coordinate with other transactors. The ``include`` method will send on the same message that was received to other transactors. The ``sendTo`` method allows you to specify both the actor to send to, and the message to send. + +Example of coordinating an increment: + +.. code-block:: scala + + import akka.transactor.Transactor + import akka.stm.Ref + import akka.actor.ActorRef + + case object Increment + + class FriendlyCounter(friend: ActorRef) extends Transactor { + val count = Ref(0) + + override def coordinate = { + case Increment => include(friend) + } + + override def atomically = { + case Increment => count alter (_ + 1) + } + } + +Using ``include`` to include more than one transactor: + +.. code-block:: scala + + override def coordinate = { + case Message => include(actor1, actor2, actor3) + } + +Using ``sendTo`` to coordinate transactions but pass-on a different message than the one that was received: + +.. code-block:: scala + + override def coordinate = { + case Message => sendTo(someActor -> SomeOtherMessage) + case SomeMessage => sendTo(actor1 -> Message1, actor2 -> Message2) + } + +To execute directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. These methods also expect partial functions like the receive method. They do not execute within the transaction. + +To completely bypass coordinated transactions override the ``normally`` method. Any message matched by ``normally`` will not be matched by the other methods, and will not be involved in coordinated transactions. In this method you can implement normal actor behavior, or use the normal STM atomic for local transactions. + + +Coordinating Typed Actors +------------------------- + +It's also possible to use coordinated transactions with typed actors. You can explicitly pass around ``Coordinated`` objects, or use built-in support with the ``@Coordinated`` annotation and the ``Coordination.coordinate`` method. + +To specify a method should use coordinated transactions add the ``@Coordinated`` annotation. **Note**: the ``@Coordinated`` annotation only works with methods that return Unit (one-way methods). + +.. code-block:: scala + + trait Counter { + @Coordinated def increment() + def get: Int + } + +To coordinate transactions use a ``coordinate`` block: + +.. code-block:: scala + + coordinate { + counter1.increment() + counter2.increment() + } + +Here's an example of using ``@Coordinated`` with a TypedActor to coordinate increments. + +.. code-block:: scala + + import akka.actor.TypedActor + import akka.stm.Ref + import akka.transactor.annotation.Coordinated + import akka.transactor.Coordination._ + + trait Counter { + @Coordinated def increment() + def get: Int + } + + class CounterImpl extends TypedActor with Counter { + val ref = Ref(0) + def increment() { ref alter (_ + 1) } + def get = ref.get + } + + ... + + val counter1 = TypedActor.newInstance(classOf[Counter], classOf[CounterImpl]) + val counter2 = TypedActor.newInstance(classOf[Counter], classOf[CounterImpl]) + + coordinate { + counter1.increment() + counter2.increment() + } + + TypedActor.stop(counter1) + TypedActor.stop(counter2) + +The ``coordinate`` block will wait for the transactions to complete. If you do not want to wait then you can specify this explicitly: + +.. code-block:: scala + + coordinate(wait = false) { + counter1.increment() + counter2.increment() + } + diff --git a/akka-docs/java/stm.rst b/akka-docs/java/stm.rst index 3cbf390bd1..453cc580d9 100644 --- a/akka-docs/java/stm.rst +++ b/akka-docs/java/stm.rst @@ -3,513 +3,4 @@ Software Transactional Memory (Java) ==================================== -.. sidebar:: Contents - - .. contents:: :local: - -Module stability: **SOLID** - -Overview of STM ---------------- - -An `STM `_ turns the Java heap into a transactional data set with begin/commit/rollback semantics. Very much like a regular database. It implements the first three letters in ACID; ACI: -* (failure) Atomicity: all changes during the execution of a transaction make it, or none make it. This only counts for transactional datastructures. -* Consistency: a transaction gets a consistent of reality (in Akka you get the Oracle version of the SERIALIZED isolation level). -* Isolated: changes made by concurrent execution transactions are not visible to each other. - -Generally, the STM is not needed that often when working with Akka. Some use-cases (that we can think of) are: - -- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. -- When you want to share a datastructure across actors. -- When you need to use the persistence modules. - -Akka’s STM implements the concept in `Clojure’s `_ STM view on state in general. Please take the time to read `this excellent document `_ and view `this presentation `_ by Rich Hickey (the genius behind Clojure), since it forms the basis of Akka’s view on STM and state in general. - -The STM is based on Transactional References (referred to as Refs). Refs are memory cells, holding an (arbitrary) immutable value, that implement CAS (Compare-And-Swap) semantics and are managed and enforced by the STM for coordinated changes across many Refs. They are implemented using the excellent `Multiverse STM `_. - -Working with immutable collections can sometimes give bad performance due to extensive copying. Scala provides so-called persistent datastructures which makes working with immutable collections fast. They are immutable but with constant time access and modification. The use of structural sharing and an insert or update does not ruin the old structure, hence “persistent”. Makes working with immutable composite types fast. The persistent datastructures currently consist of a Map and Vector. - -Simple example --------------- - -Here is a simple example of an incremental counter using STM. This shows creating a ``Ref``, a transactional reference, and then modifying it within a transaction, which is delimited by an ``Atomic`` anonymous inner class. - -.. code-block:: java - - import akka.stm.*; - - final Ref ref = new Ref(0); - - public int counter() { - return new Atomic() { - public Integer atomically() { - int inc = ref.get() + 1; - ref.set(inc); - return inc; - } - }.execute(); - } - - counter(); - // -> 1 - - counter(); - // -> 2 - - -Ref ---- - -Refs (transactional references) are mutable references to values and through the STM allow the safe sharing of mutable data. To ensure safety the value stored in a Ref should be immutable. The value referenced by a Ref can only be accessed or swapped within a transaction. Refs separate identity from value. - -Creating a Ref -^^^^^^^^^^^^^^ - -You can create a Ref with or without an initial value. - -.. code-block:: java - - import akka.stm.*; - - // giving an initial value - final Ref ref = new Ref(0); - - // specifying a type but no initial value - final Ref ref = new Ref(); - -Accessing the value of a Ref -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use ``get`` to access the value of a Ref. Note that if no initial value has been given then the value is initially ``null``. - -.. code-block:: java - - import akka.stm.*; - - final Ref ref = new Ref(0); - - Integer value = new Atomic() { - public Integer atomically() { - return ref.get(); - } - }.execute(); - // -> value = 0 - -Changing the value of a Ref -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), which sets the new value and returns the old value. - -.. code-block:: java - - import akka.stm.*; - - final Ref ref = new Ref(0); - - new Atomic() { - public Object atomically() { - return ref.set(5); - } - }.execute(); - - -Transactions ------------- - -A transaction is delimited using an ``Atomic`` anonymous inner class. - -.. code-block:: java - - new Atomic() { - public Object atomically() { - // ... - } - }.execute(); - -All changes made to transactional objects are isolated from other changes, all make it or non make it (so failure atomicity) and are consistent. With the AkkaSTM you automatically have the Oracle version of the SERIALIZED isolation level, lower isolation is not possible. To make it fully serialized, set the writeskew property that checks if a writeskew problem is allowed to happen. - -Retries -^^^^^^^ - -A transaction is automatically retried when it runs into some read or write conflict, until the operation completes, an exception (throwable) is thrown or when there are too many retries. When a read or writeconflict is encountered, the transaction uses a bounded exponential backoff to prevent cause more contention and give other transactions some room to complete. - -If you are using non transactional resources in an atomic block, there could be problems because a transaction can be retried. If you are using print statements or logging, it could be that they are called more than once. So you need to be prepared to deal with this. One of the possible solutions is to work with a deferred or compensating task that is executed after the transaction aborts or commits. - -Unexpected retries -^^^^^^^^^^^^^^^^^^ - -It can happen for the first few executions that you get a few failures of execution that lead to unexpected retries, even though there is not any read or writeconflict. The cause of this is that speculative transaction configuration/selection is used. There are transactions optimized for a single transactional object, for 1..n and for n to unlimited. So based on the execution of the transaction, the system learns; it begins with a cheap one and upgrades to more expensive ones. Once it has learned, it will reuse this knowledge. It can be activated/deactivated using the speculative property on the TransactionFactoryBuilder. In most cases it is best use the default value (enabled) so you get more out of performance. - -Coordinated transactions and Transactors -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you need coordinated transactions across actors or threads then see :ref:`transactors-java`. - -Configuring transactions -^^^^^^^^^^^^^^^^^^^^^^^^ - -It's possible to configure transactions. The ``Atomic`` class can take a ``TransactionFactory``, which can determine properties of the transaction. A default transaction factory is used if none is specified. You can create a ``TransactionFactory`` with a ``TransactionFactoryBuilder``. - -Configuring transactions with a ``TransactionFactory``: - -.. code-block:: java - - import akka.stm.*; - - TransactionFactory txFactory = new TransactionFactoryBuilder() - .setReadonly(true) - .build(); - - new Atomic(txFactory) { - public Object atomically() { - // read only transaction - return ...; - } - }.execute(); - -The following settings are possible on a TransactionFactory: - -- familyName - Family name for transactions. Useful for debugging because the familyName is shown in exceptions, logging and in the future also will be used for profiling. -- readonly - Sets transaction as readonly. Readonly transactions are cheaper and can be used to prevent modification to transactional objects. -- maxRetries - The maximum number of times a transaction will retry. -- timeout - The maximum time a transaction will block for. -- trackReads - Whether all reads should be tracked. Needed for blocking operations. Readtracking makes a transaction more expensive, but makes subsequent reads cheaper and also lowers the chance of a readconflict. -- writeSkew - Whether writeskew is allowed. Disable with care. -- blockingAllowed - Whether explicit retries are allowed. -- interruptible - Whether a blocking transaction can be interrupted if it is blocked. -- speculative - Whether speculative configuration should be enabled. -- quickRelease - Whether locks should be released as quickly as possible (before whole commit). -- propagation - For controlling how nested transactions behave. -- traceLevel - Transaction trace level. - -You can also specify the default values for some of these options in :ref:`configuration`. - -Transaction lifecycle listeners -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It's possible to have code that will only run on the successful commit of a transaction, or when a transaction aborts. You can do this by adding ``deferred`` or ``compensating`` blocks to a transaction. - -.. code-block:: java - - import akka.stm.*; - import static akka.stm.StmUtils.deferred; - import static akka.stm.StmUtils.compensating; - - new Atomic() { - public Object atomically() { - deferred(new Runnable() { - public void run() { - // executes when transaction commits - } - }); - compensating(new Runnable() { - public void run() { - // executes when transaction aborts - } - }); - // ... - return something; - } - }.execute(); - -Blocking transactions -^^^^^^^^^^^^^^^^^^^^^ - -You can block in a transaction until a condition is met by using an explicit ``retry``. To use ``retry`` you also need to configure the transaction to allow explicit retries. - -Here is an example of using ``retry`` to block until an account has enough money for a withdrawal. This is also an example of using actors and STM together. - -.. code-block:: java - - import akka.stm.*; - - public class Transfer { - private final Ref from; - private final Ref to; - private final double amount; - - public Transfer(Ref from, Ref to, double amount) { - this.from = from; - this.to = to; - this.amount = amount; - } - - public Ref getFrom() { return from; } - public Ref getTo() { return to; } - public double getAmount() { return amount; } - } - -.. code-block:: java - - import akka.stm.*; - import static akka.stm.StmUtils.retry; - import akka.actor.*; - import akka.util.FiniteDuration; - import java.util.concurrent.TimeUnit; - import akka.event.EventHandler; - - public class Transferer extends UntypedActor { - TransactionFactory txFactory = new TransactionFactoryBuilder() - .setBlockingAllowed(true) - .setTrackReads(true) - .setTimeout(new FiniteDuration(60, TimeUnit.SECONDS)) - .build(); - - public void onReceive(Object message) throws Exception { - if (message instanceof Transfer) { - Transfer transfer = (Transfer) message; - final Ref from = transfer.getFrom(); - final Ref to = transfer.getTo(); - final double amount = transfer.getAmount(); - new Atomic(txFactory) { - public Object atomically() { - if (from.get() < amount) { - EventHandler.info(this, "not enough money - retrying"); - retry(); - } - EventHandler.info(this, "transferring"); - from.set(from.get() - amount); - to.set(to.get() + amount); - return null; - } - }.execute(); - } - } - } - -.. code-block:: java - - import akka.stm.*; - import akka.actor.*; - - public class Main { - public static void main(String...args) throws Exception { - final Ref account1 = new Ref(100.0); - final Ref account2 = new Ref(100.0); - - ActorRef transferer = Actors.actorOf(Transferer.class); - - transferer.tell(new Transfer(account1, account2, 500.0)); - // Transferer: not enough money - retrying - - new Atomic() { - public Object atomically() { - return account1.set(account1.get() + 2000); - } - }.execute(); - // Transferer: transferring - - Thread.sleep(1000); - - Double acc1 = new Atomic() { - public Double atomically() { - return account1.get(); - } - }.execute(); - - Double acc2 = new Atomic() { - public Double atomically() { - return account2.get(); - } - }.execute(); - - - - System.out.println("Account 1: " + acc1); - // Account 1: 1600.0 - - System.out.println("Account 2: " + acc2); - // Account 2: 600.0 - - transferer.stop(); - } - } - -Alternative blocking transactions -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can also have two alternative blocking transactions, one of which can succeed first, with ``EitherOrElse``. - -.. code-block:: java - - import akka.stm.*; - - public class Branch { - private final Ref left; - private final Ref right; - private final double amount; - - public Branch(Ref left, Ref right, int amount) { - this.left = left; - this.right = right; - this.amount = amount; - } - - public Ref getLeft() { return left; } - - public Ref getRight() { return right; } - - public double getAmount() { return amount; } - } - -.. code-block:: java - - import akka.actor.*; - import akka.stm.*; - import static akka.stm.StmUtils.retry; - import akka.util.FiniteDuration; - import java.util.concurrent.TimeUnit; - import akka.event.EventHandler; - - public class Brancher extends UntypedActor { - TransactionFactory txFactory = new TransactionFactoryBuilder() - .setBlockingAllowed(true) - .setTrackReads(true) - .setTimeout(new FiniteDuration(60, TimeUnit.SECONDS)) - .build(); - - public void onReceive(Object message) throws Exception { - if (message instanceof Branch) { - Branch branch = (Branch) message; - final Ref left = branch.getLeft(); - final Ref right = branch.getRight(); - final double amount = branch.getAmount(); - new Atomic(txFactory) { - public Integer atomically() { - return new EitherOrElse() { - public Integer either() { - if (left.get() < amount) { - EventHandler.info(this, "not enough on left - retrying"); - retry(); - } - EventHandler.info(this, "going left"); - return left.get(); - } - public Integer orElse() { - if (right.get() < amount) { - EventHandler.info(this, "not enough on right - retrying"); - retry(); - } - EventHandler.info(this, "going right"); - return right.get(); - } - }.execute(); - } - }.execute(); - } - } - } - -.. code-block:: java - - import akka.stm.*; - import akka.actor.*; - - public class Main2 { - public static void main(String...args) throws Exception { - final Ref left = new Ref(100); - final Ref right = new Ref(100); - - ActorRef brancher = Actors.actorOf(Brancher.class); - - brancher.tell(new Branch(left, right, 500)); - // not enough on left - retrying - // not enough on right - retrying - - Thread.sleep(1000); - - new Atomic() { - public Object atomically() { - return right.set(right.get() + 1000); - } - }.execute(); - // going right - - - - brancher.stop(); - } - } - - -Transactional datastructures ----------------------------- - -Akka provides two datastructures that are managed by the STM. - -- TransactionalMap -- TransactionalVector - -TransactionalMap and TransactionalVector look like regular mutable datastructures, they even implement the standard Scala 'Map' and 'RandomAccessSeq' interfaces, but they are implemented using persistent datastructures and managed references under the hood. Therefore they are safe to use in a concurrent environment. Underlying TransactionalMap is HashMap, an immutable Map but with near constant time access and modification operations. Similarly TransactionalVector uses a persistent Vector. See the Persistent Datastructures section below for more details. - -Like managed references, TransactionalMap and TransactionalVector can only be modified inside the scope of an STM transaction. - -Here is an example of creating and accessing a TransactionalMap: - -.. code-block:: java - - import akka.stm.*; - - // assuming a User class - - final TransactionalMap users = new TransactionalMap(); - - // fill users map (in a transaction) - new Atomic() { - public Object atomically() { - users.put("bill", new User("bill")); - users.put("mary", new User("mary")); - users.put("john", new User("john")); - return null; - } - }.execute(); - - // access users map (in a transaction) - User user = new Atomic() { - public User atomically() { - return users.get("bill").get(); - } - }.execute(); - -Here is an example of creating and accessing a TransactionalVector: - -.. code-block:: java - - import akka.stm.*; - - // assuming an Address class - - final TransactionalVector
    addresses = new TransactionalVector
    (); - - // fill addresses vector (in a transaction) - new Atomic() { - public Object atomically() { - addresses.add(new Address("somewhere")); - addresses.add(new Address("somewhere else")); - return null; - } - }.execute(); - - // access addresses vector (in a transaction) - Address address = new Atomic
    () { - public Address atomically() { - return addresses.get(0); - } - }.execute(); - - -Persistent datastructures -------------------------- - -Akka's STM should only be used with immutable data. This can be costly if you have large datastructures and are using a naive copy-on-write. In order to make working with immutable datastructures fast enough Scala provides what are called Persistent Datastructures. There are currently two different ones: - -- HashMap (`scaladoc `__) -- Vector (`scaladoc `__) - -They are immutable and each update creates a completely new version but they are using clever structural sharing in order to make them almost as fast, for both read and update, as regular mutable datastructures. - -This illustration is taken from Rich Hickey's presentation. Copyright Rich Hickey 2009. - -.. image:: ../images/clojure-trees.png - - +Documentation of Akka STM has not been migrated to Akka 2.0-SNAPSHOT yet. \ No newline at end of file diff --git a/akka-docs/java/transactors.rst b/akka-docs/java/transactors.rst index cc069e3d9c..27a006701e 100644 --- a/akka-docs/java/transactors.rst +++ b/akka-docs/java/transactors.rst @@ -3,269 +3,4 @@ Transactors (Java) ================== -.. sidebar:: Contents - - .. contents:: :local: - -Module stability: **SOLID** - -Why Transactors? ----------------- - -Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see `this JavaOne presentation `_. - -**STM** on the other hand is excellent for problems where you need consensus and a stable view of the state by providing compositional transactional shared state. Some of the really nice traits of STM are that transactions compose, and it raises the abstraction level from lock-based concurrency. - -Akka's Transactors combine Actors and STM to provide the best of the Actor model (concurrency and asynchronous event-based programming) and STM (compositional transactional shared state) by providing transactional, compositional, asynchronous, event-based message flows. - -If you need Durability then you should not use one of the in-memory data structures but one of the persistent ones. - -Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: - -- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. -- When you want to share a datastructure across actors. -- When you need to use the persistence modules. - -Actors and STM -^^^^^^^^^^^^^^ - -You can combine Actors and STM in several ways. An Actor may use STM internally so that particular changes are guaranteed to be atomic. Actors may also share transactional datastructures as the STM provides safe shared state across threads. - -It's also possible to coordinate transactions across Actors or threads so that either the transactions in a set all commit successfully or they all fail. This is the focus of Transactors and the explicit support for coordinated transactions in this section. - - -Coordinated transactions ------------------------- - -Akka provides an explicit mechanism for coordinating transactions across actors. Under the hood it uses a ``CountDownCommitBarrier``, similar to a CountDownLatch. - -Here is an example of coordinating two simple counter UntypedActors so that they both increment together in coordinated transactions. If one of them was to fail to increment, the other would also fail. - -.. code-block:: java - - import akka.actor.ActorRef; - - public class Increment { - private final ActorRef friend; - - public Increment() { - this.friend = null; - } - - public Increment(ActorRef friend) { - this.friend = friend; - } - - public boolean hasFriend() { - return friend != null; - } - - public ActorRef getFriend() { - return friend; - } - } - -.. code-block:: java - - import akka.actor.UntypedActor; - import akka.stm.Ref; - import akka.transactor.Atomically; - import akka.transactor.Coordinated; - - public class Counter extends UntypedActor { - private Ref count = new Ref(0); - - private void increment() { - count.set(count.get() + 1); - } - - public void onReceive(Object incoming) throws Exception { - if (incoming instanceof Coordinated) { - Coordinated coordinated = (Coordinated) incoming; - Object message = coordinated.getMessage(); - if (message instanceof Increment) { - Increment increment = (Increment) message; - if (increment.hasFriend()) { - increment.getFriend().tell(coordinated.coordinate(new Increment())); - } - coordinated.atomic(new Atomically() { - public void atomically() { - increment(); - } - }); - } - } else if (incoming.equals("GetCount")) { - getContext().reply(count.get()); - } - } - } - -.. code-block:: java - - ActorRef counter1 = actorOf(Counter.class); - ActorRef counter2 = actorOf(Counter.class); - - counter1.tell(new Coordinated(new Increment(counter2))); - -To start a new coordinated transaction that you will also participate in, just create a ``Coordinated`` object: - -.. code-block:: java - - Coordinated coordinated = new Coordinated(); - -To start a coordinated transaction that you won't participate in yourself you can create a ``Coordinated`` object with a message and send it directly to an actor. The recipient of the message will be the first member of the coordination set: - -.. code-block:: java - - actor.tell(new Coordinated(new Message())); - -To include another actor in the same coordinated transaction that you've created or received, use the ``coordinate`` method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. - -.. code-block:: java - - actor.tell(coordinated.coordinate(new Message())); - -To enter the coordinated transaction use the atomic method of the coordinated object. This accepts either an ``akka.transactor.Atomically`` object, or an ``Atomic`` object the same as used normally in the STM (just don't execute it - the coordination will do that). - -.. code-block:: java - - coordinated.atomic(new Atomically() { - public void atomically() { - // do something in a transaction - } - }); - -The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. - - -UntypedTransactor ------------------ - -UntypedTransactors are untyped actors that provide a general pattern for coordinating transactions, using the explicit coordination described above. - -Here's an example of a simple untyped transactor that will join a coordinated transaction: - -.. code-block:: java - - import akka.transactor.UntypedTransactor; - import akka.stm.Ref; - - public class Counter extends UntypedTransactor { - Ref count = new Ref(0); - - @Override - public void atomically(Object message) { - if (message instanceof Increment) { - count.set(count.get() + 1); - } - } - } - -You could send this Counter transactor a ``Coordinated(Increment)`` message. If you were to send it just an ``Increment`` message it will create its own ``Coordinated`` (but in this particular case wouldn't be coordinating transactions with any other transactors). - -To coordinate with other transactors override the ``coordinate`` method. The ``coordinate`` method maps a message to a set of ``SendTo`` objects, pairs of ``ActorRef`` and a message. You can use the ``include`` and ``sendTo`` methods to easily coordinate with other transactors. - -Example of coordinating an increment, similar to the explicitly coordinated example: - -.. code-block:: java - - import akka.transactor.UntypedTransactor; - import akka.transactor.SendTo; - import akka.stm.Ref; - - import java.util.Set; - - public class Counter extends UntypedTransactor { - Ref count = new Ref(0); - - @Override - public Set coordinate(Object message) { - if (message instanceof Increment) { - Increment increment = (Increment) message; - if (increment.hasFriend()) - return include(increment.getFriend(), new Increment()); - } - return nobody(); - } - - @Override - public void atomically(Object message) { - if (message instanceof Increment) { - count.set(count.get() + 1); - } - } - } - -To execute directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. They do not execute within the transaction. - -To completely bypass coordinated transactions override the ``normally`` method. Any message matched by ``normally`` will not be matched by the other methods, and will not be involved in coordinated transactions. In this method you can implement normal actor behavior, or use the normal STM atomic for local transactions. - - -Coordinating Typed Actors -------------------------- - -It's also possible to use coordinated transactions with typed actors. You can explicitly pass around ``Coordinated`` objects, or use built-in support with the ``@Coordinated`` annotation and the ``Coordination.coordinate`` method. - -To specify a method should use coordinated transactions add the ``@Coordinated`` annotation. **Note**: the ``@Coordinated`` annotation will only work with void (one-way) methods. - -.. code-block:: java - - public interface Counter { - @Coordinated public void increment(); - public Integer get(); - } - -To coordinate transactions use a ``coordinate`` block. This accepts either an ``akka.transactor.Atomically`` object, or an ``Atomic`` object liked used in the STM (but don't execute it). The first boolean parameter specifies whether or not to wait for the transactions to complete. - -.. code-block:: java - - Coordination.coordinate(true, new Atomically() { - public void atomically() { - counter1.increment(); - counter2.increment(); - } - }); - -Here's an example of using ``@Coordinated`` with a TypedActor to coordinate increments: - -.. code-block:: java - - import akka.transactor.annotation.Coordinated; - - public interface Counter { - @Coordinated public void increment(); - public Integer get(); - } - -.. code-block:: java - - import akka.actor.TypedActor; - import akka.stm.Ref; - - public class CounterImpl extends TypedActor implements Counter { - private Ref count = new Ref(0); - - public void increment() { - count.set(count.get() + 1); - } - - public Integer get() { - return count.get(); - } - } - -.. code-block:: java - - Counter counter1 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); - Counter counter2 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); - - Coordination.coordinate(true, new Atomically() { - public void atomically() { - counter1.increment(); - counter2.increment(); - } - }); - - TypedActor.stop(counter1); - TypedActor.stop(counter2); - +Documentation of Akka Transactors has not been migrated to Akka 2.0-SNAPSHOT yet. \ No newline at end of file diff --git a/akka-docs/scala/stm.rst b/akka-docs/scala/stm.rst index f21f988939..93aeb15b88 100644 --- a/akka-docs/scala/stm.rst +++ b/akka-docs/scala/stm.rst @@ -5,533 +5,4 @@ Software Transactional Memory (Scala) ####################################### -.. sidebar:: Contents - - .. contents:: :local: - -Overview of STM -=============== - -An `STM `_ turns the -Java heap into a transactional data set with begin/commit/rollback -semantics. Very much like a regular database. It implements the first three -letters in ACID; ACI: - -* Atomic -* Consistent -* Isolated - -Generally, the STM is not needed very often when working with Akka. Some -use-cases (that we can think of) are: - -- When you really need composable message flows across many actors updating - their **internal local** state but need them to do that atomically in one big - transaction. Might not be often, but when you do need this then you are - screwed without it. -- When you want to share a datastructure across actors. -- When you need to use the persistence modules. - -Akka’s STM implements the concept in `Clojure's `_ STM view on state in -general. Please take the time to read `this excellent document `_ -and view `this presentation `_ by Rich Hickey (the genius -behind Clojure), since it forms the basis of Akka’s view on STM and state in -general. - -.. _clojure: http://clojure.org/ -.. _clojure-state: http://clojure.org/state -.. _clojure-presentation: http://www.infoq.com/presentations/Value-Identity-State-Rich-Hickey - -The STM is based on Transactional References (referred to as Refs). Refs are -memory cells, holding an (arbitrary) immutable value, that implement CAS -(Compare-And-Swap) semantics and are managed and enforced by the STM for -coordinated changes across many Refs. They are implemented using the excellent -`Multiverse STM `_. - -.. _multiverse: http://multiverse.codehaus.org/overview.html - -Working with immutable collections can sometimes give bad performance due to -extensive copying. Scala provides so-called persistent datastructures which -makes working with immutable collections fast. They are immutable but with -constant time access and modification. They use structural sharing and an insert -or update does not ruin the old structure, hence “persistent”. Makes working -with immutable composite types fast. The persistent datastructures currently -consist of a Map and Vector. - - -Simple example -============== - -Here is a simple example of an incremental counter using STM. This shows -creating a ``Ref``, a transactional reference, and then modifying it within a -transaction, which is delimited by ``atomic``. - -.. includecode:: code/StmDocSpec.scala#simple - - -Ref ---- - -Refs (transactional references) are mutable references to values and through the STM allow the safe sharing of mutable data. Refs separate identity from value. To ensure safety the value stored in a Ref should be immutable (they can of course contain refs themselves). The value referenced by a Ref can only be accessed or swapped within a transaction. If a transaction is not available, the call will be executed in its own transaction (the call will be atomic). This is a different approach than the Clojure Refs, where a missing transaction results in an error. - -Creating a Ref -^^^^^^^^^^^^^^ - -You can create a Ref with or without an initial value. - -.. code-block:: scala - - import akka.stm._ - - // giving an initial value - val ref = Ref(0) - - // specifying a type but no initial value - val ref = Ref[Int] - -Accessing the value of a Ref -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use ``get`` to access the value of a Ref. Note that if no initial value has been given then the value is initially ``null``. - -.. code-block:: scala - - import akka.stm._ - - val ref = Ref(0) - - atomic { - ref.get - } - // -> 0 - -If there is a chance that the value of a Ref is null then you can use ``opt``, which will create an Option, either Some(value) or None, or you can provide a default value with ``getOrElse``. You can also check for null using ``isNull``. - -.. code-block:: scala - - import akka.stm._ - - val ref = Ref[Int] - - atomic { - ref.opt // -> None - ref.getOrElse(0) // -> 0 - ref.isNull // -> true - } - -Changing the value of a Ref -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), which sets the new value and returns the old value. - -.. code-block:: scala - - import akka.stm._ - - val ref = Ref(0) - - atomic { - ref.set(5) - } - // -> 0 - - atomic { - ref.get - } - // -> 5 - -You can also use ``alter`` which accepts a function that takes the old value and creates a new value of the same type. - -.. code-block:: scala - - import akka.stm._ - - val ref = Ref(0) - - atomic { - ref alter (_ + 5) - } - // -> 5 - - val inc = (i: Int) => i + 1 - - atomic { - ref alter inc - } - // -> 6 - -Refs in for-comprehensions -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Ref is monadic and can be used in for-comprehensions. - -.. code-block:: scala - - import akka.stm._ - - val ref = Ref(1) - - atomic { - for (value <- ref) { - // do something with value - } - } - - val anotherRef = Ref(3) - - atomic { - for { - value1 <- ref - value2 <- anotherRef - } yield (value1 + value2) - } - // -> Ref(4) - - val emptyRef = Ref[Int] - - atomic { - for { - value1 <- ref - value2 <- emptyRef - } yield (value1 + value2) - } - // -> Ref[Int] - - -Transactions ------------- - -A transaction is delimited using ``atomic``. - -.. code-block:: scala - - atomic { - // ... - } - -All changes made to transactional objects are isolated from other changes, all make it or non make it (so failure atomicity) and are consistent. With the AkkaSTM you automatically have the Oracle version of the SERIALIZED isolation level, lower isolation is not possible. To make it fully serialized, set the writeskew property that checks if a writeskew problem is allowed to happen. - -Retries -^^^^^^^ - -A transaction is automatically retried when it runs into some read or write conflict, until the operation completes, an exception (throwable) is thrown or when there are too many retries. When a read or writeconflict is encountered, the transaction uses a bounded exponential backoff to prevent cause more contention and give other transactions some room to complete. - -If you are using non transactional resources in an atomic block, there could be problems because a transaction can be retried. If you are using print statements or logging, it could be that they are called more than once. So you need to be prepared to deal with this. One of the possible solutions is to work with a deferred or compensating task that is executed after the transaction aborts or commits. - -Unexpected retries -^^^^^^^^^^^^^^^^^^ - -It can happen for the first few executions that you get a few failures of execution that lead to unexpected retries, even though there is not any read or writeconflict. The cause of this is that speculative transaction configuration/selection is used. There are transactions optimized for a single transactional object, for 1..n and for n to unlimited. So based on the execution of the transaction, the system learns; it begins with a cheap one and upgrades to more expensive ones. Once it has learned, it will reuse this knowledge. It can be activated/deactivated using the speculative property on the TransactionFactory. In most cases it is best use the default value (enabled) so you get more out of performance. - -Coordinated transactions and Transactors -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you need coordinated transactions across actors or threads then see :ref:`transactors-scala`. - -Configuring transactions -^^^^^^^^^^^^^^^^^^^^^^^^ - -It's possible to configure transactions. The ``atomic`` method can take an implicit or explicit ``TransactionFactory``, which can determine properties of the transaction. A default transaction factory is used if none is specified explicitly or there is no implicit ``TransactionFactory`` in scope. - -Configuring transactions with an **implicit** ``TransactionFactory``: - -.. code-block:: scala - - import akka.stm._ - - implicit val txFactory = TransactionFactory(readonly = true) - - atomic { - // read only transaction - } - -Configuring transactions with an **explicit** ``TransactionFactory``: - -.. code-block:: scala - - import akka.stm._ - - val txFactory = TransactionFactory(readonly = true) - - atomic(txFactory) { - // read only transaction - } - -The following settings are possible on a TransactionFactory: - -- ``familyName`` - Family name for transactions. Useful for debugging. -- ``readonly`` - Sets transaction as readonly. Readonly transactions are cheaper. -- ``maxRetries`` - The maximum number of times a transaction will retry. -- ``timeout`` - The maximum time a transaction will block for. -- ``trackReads`` - Whether all reads should be tracked. Needed for blocking operations. -- ``writeSkew`` - Whether writeskew is allowed. Disable with care. -- ``blockingAllowed`` - Whether explicit retries are allowed. -- ``interruptible`` - Whether a blocking transaction can be interrupted. -- ``speculative`` - Whether speculative configuration should be enabled. -- ``quickRelease`` - Whether locks should be released as quickly as possible (before whole commit). -- ``propagation`` - For controlling how nested transactions behave. -- ``traceLevel`` - Transaction trace level. - -You can also specify the default values for some of these options in the :ref:`configuration`. - -You can also determine at which level a transaction factory is shared or not shared, which affects the way in which the STM can optimise transactions. - -Here is a shared transaction factory for all instances of an actor. - -.. code-block:: scala - - import akka.actor._ - import akka.stm._ - - object MyActor { - implicit val txFactory = TransactionFactory(readonly = true) - } - - class MyActor extends Actor { - import MyActor.txFactory - - def receive = { - case message: String => - atomic { - // read only transaction - } - } - } - -Here's a similar example with an individual transaction factory for each instance of an actor. - -.. code-block:: scala - - import akka.actor._ - import akka.stm._ - - class MyActor extends Actor { - implicit val txFactory = TransactionFactory(readonly = true) - - def receive = { - case message: String => - atomic { - // read only transaction - } - } - } - -Transaction lifecycle listeners -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It's possible to have code that will only run on the successful commit of a transaction, or when a transaction aborts. You can do this by adding ``deferred`` or ``compensating`` blocks to a transaction. - -.. code-block:: scala - - import akka.stm._ - - atomic { - deferred { - // executes when transaction commits - } - compensating { - // executes when transaction aborts - } - } - -Blocking transactions -^^^^^^^^^^^^^^^^^^^^^ - -You can block in a transaction until a condition is met by using an explicit ``retry``. To use ``retry`` you also need to configure the transaction to allow explicit retries. - -Here is an example of using ``retry`` to block until an account has enough money for a withdrawal. This is also an example of using actors and STM together. - -.. code-block:: scala - - import akka.stm._ - import akka.actor._ - import akka.util.duration._ - import akka.event.EventHandler - - type Account = Ref[Double] - - case class Transfer(from: Account, to: Account, amount: Double) - - class Transferer extends Actor { - implicit val txFactory = TransactionFactory(blockingAllowed = true, trackReads = true, timeout = 60 seconds) - - def receive = { - case Transfer(from, to, amount) => - atomic { - if (from.get < amount) { - EventHandler.info(this, "not enough money - retrying") - retry - } - EventHandler.info(this, "transferring") - from alter (_ - amount) - to alter (_ + amount) - } - } - } - - val account1 = Ref(100.0) - val account2 = Ref(100.0) - - val transferer = Actor.actorOf(new Transferer) - - transferer ! Transfer(account1, account2, 500.0) - // INFO Transferer: not enough money - retrying - - atomic { account1 alter (_ + 2000) } - // INFO Transferer: transferring - - atomic { account1.get } - // -> 1600.0 - - atomic { account2.get } - // -> 600.0 - - transferer.stop() - -Alternative blocking transactions -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can also have two alternative blocking transactions, one of which can succeed first, with ``either-orElse``. - -.. code-block:: scala - - import akka.stm._ - import akka.actor._ - import akka.util.duration._ - import akka.event.EventHandler - - case class Branch(left: Ref[Int], right: Ref[Int], amount: Int) - - class Brancher extends Actor { - implicit val txFactory = TransactionFactory(blockingAllowed = true, trackReads = true, timeout = 60 seconds) - - def receive = { - case Branch(left, right, amount) => - atomic { - either { - if (left.get < amount) { - EventHandler.info(this, "not enough on left - retrying") - retry - } - log.info("going left") - } orElse { - if (right.get < amount) { - EventHandler.info(this, "not enough on right - retrying") - retry - } - log.info("going right") - } - } - } - } - - val ref1 = Ref(0) - val ref2 = Ref(0) - - val brancher = Actor.actorOf(new Brancher) - - brancher ! Branch(ref1, ref2, 1) - // INFO Brancher: not enough on left - retrying - // INFO Brancher: not enough on right - retrying - - atomic { ref2 alter (_ + 1) } - // INFO Brancher: not enough on left - retrying - // INFO Brancher: going right - - brancher.stop() - - -Transactional datastructures ----------------------------- - -Akka provides two datastructures that are managed by the STM. - -- ``TransactionalMap`` -- ``TransactionalVector`` - -``TransactionalMap`` and ``TransactionalVector`` look like regular mutable datastructures, they even implement the standard Scala 'Map' and 'RandomAccessSeq' interfaces, but they are implemented using persistent datastructures and managed references under the hood. Therefore they are safe to use in a concurrent environment. Underlying TransactionalMap is HashMap, an immutable Map but with near constant time access and modification operations. Similarly ``TransactionalVector`` uses a persistent Vector. See the Persistent Datastructures section below for more details. - -Like managed references, ``TransactionalMap`` and ``TransactionalVector`` can only be modified inside the scope of an STM transaction. - -*IMPORTANT*: There have been some problems reported when using transactional datastructures with 'lazy' initialization. Avoid that. - -Here is how you create these transactional datastructures: - -.. code-block:: scala - - import akka.stm._ - - // assuming something like - case class User(name: String) - case class Address(location: String) - - // using initial values - val map = TransactionalMap("bill" -> User("bill")) - val vector = TransactionalVector(Address("somewhere")) - - // specifying types - val map = TransactionalMap[String, User] - val vector = TransactionalVector[Address] - -``TransactionalMap`` and ``TransactionalVector`` wrap persistent datastructures with transactional references and provide a standard Scala interface. This makes them convenient to use. - -Here is an example of using a ``Ref`` and a ``HashMap`` directly: - -.. code-block:: scala - - import akka.stm._ - import scala.collection.immutable.HashMap - - case class User(name: String) - - val ref = Ref(HashMap[String, User]()) - - atomic { - val users = ref.get - val newUsers = users + ("bill" -> User("bill")) // creates a new HashMap - ref.swap(newUsers) - } - - atomic { - ref.get.apply("bill") - } - // -> User("bill") - -Here is the same example using ``TransactionalMap``: - -.. code-block:: scala - - import akka.stm._ - - case class User(name: String) - - val users = TransactionalMap[String, User] - - atomic { - users += "bill" -> User("bill") - } - - atomic { - users("bill") - } - // -> User("bill") - - -Persistent datastructures -------------------------- - -Akka's STM should only be used with immutable data. This can be costly if you have large datastructures and are using a naive copy-on-write. In order to make working with immutable datastructures fast enough Scala provides what are called Persistent Datastructures. There are currently two different ones: - -* ``HashMap`` (`scaladoc `__) -* ``Vector`` (`scaladoc `__) - -They are immutable and each update creates a completely new version but they are using clever structural sharing in order to make them almost as fast, for both read and update, as regular mutable datastructures. - -This illustration is taken from Rich Hickey's presentation. Copyright Rich Hickey 2009. - -.. image:: ../images/clojure-trees.png - - -Ants simulation sample ----------------------- - -One fun and very enlightening visual demo of STM, actors and transactional references is the `Ant simulation sample `_. I encourage you to run it and read through the code since it's a good example of using actors with STM. +Documentation of Akka STM has not been migrated to Akka 2.0-SNAPSHOT yet. \ No newline at end of file diff --git a/akka-docs/scala/transactors.rst b/akka-docs/scala/transactors.rst index 1c1154eb06..217b1ecd0c 100644 --- a/akka-docs/scala/transactors.rst +++ b/akka-docs/scala/transactors.rst @@ -3,248 +3,4 @@ Transactors (Scala) =================== -.. sidebar:: Contents - - .. contents:: :local: - -Module stability: **SOLID** - -Why Transactors? ----------------- - -Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see `this JavaOne presentation `_. - -**STM** on the other hand is excellent for problems where you need consensus and a stable view of the state by providing compositional transactional shared state. Some of the really nice traits of STM are that transactions compose, and it raises the abstraction level from lock-based concurrency. - -Akka's Transactors combine Actors and STM to provide the best of the Actor model (concurrency and asynchronous event-based programming) and STM (compositional transactional shared state) by providing transactional, compositional, asynchronous, event-based message flows. - -If you need Durability then you should not use one of the in-memory data structures but one of the persistent ones. - -Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: - -- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. -- When you want to share a datastructure across actors. -- When you need to use the persistence modules. - -Actors and STM -^^^^^^^^^^^^^^ - -You can combine Actors and STM in several ways. An Actor may use STM internally so that particular changes are guaranteed to be atomic. Actors may also share transactional datastructures as the STM provides safe shared state across threads. - -It's also possible to coordinate transactions across Actors or threads so that either the transactions in a set all commit successfully or they all fail. This is the focus of Transactors and the explicit support for coordinated transactions in this section. - - -Coordinated transactions ------------------------- - -Akka provides an explicit mechanism for coordinating transactions across Actors. Under the hood it uses a ``CountDownCommitBarrier``, similar to a CountDownLatch. - -Here is an example of coordinating two simple counter Actors so that they both increment together in coordinated transactions. If one of them was to fail to increment, the other would also fail. - -.. code-block:: scala - - import akka.transactor.Coordinated - import akka.stm.Ref - import akka.actor.{Actor, ActorRef} - - case class Increment(friend: Option[ActorRef] = None) - case object GetCount - - class Counter extends Actor { - val count = Ref(0) - - def receive = { - case coordinated @ Coordinated(Increment(friend)) => { - friend foreach (_ ! coordinated(Increment())) - coordinated atomic { - count alter (_ + 1) - } - } - case GetCount => self.reply(count.get) - } - } - - val counter1 = Actor.actorOf[Counter] - val counter2 = Actor.actorOf[Counter] - - counter1 ! Coordinated(Increment(Some(counter2))) - - ... - - (counter1 ? GetCount).as[Int] // Some(1) - - counter1.stop() - counter2.stop() - -To start a new coordinated transaction that you will also participate in, just create a ``Coordinated`` object: - -.. code-block:: scala - - val coordinated = Coordinated() - -To start a coordinated transaction that you won't participate in yourself you can create a ``Coordinated`` object with a message and send it directly to an actor. The recipient of the message will be the first member of the coordination set: - -.. code-block:: scala - - actor ! Coordinated(Message) - -To receive a coordinated message in an actor simply match it in a case statement: - -.. code-block:: scala - - def receive = { - case coordinated @ Coordinated(Message) => ... - } - -To include another actor in the same coordinated transaction that you've created or received, use the apply method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. - -.. code-block:: scala - - actor ! coordinated(Message) - -To enter the coordinated transaction use the atomic method of the coordinated object: - -.. code-block:: scala - - coordinated atomic { - // do something in transaction ... - } - -The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. - - -Transactor ----------- - -Transactors are actors that provide a general pattern for coordinating transactions, using the explicit coordination described above. - -Here's an example of a simple transactor that will join a coordinated transaction: - -.. code-block:: scala - - import akka.transactor.Transactor - import akka.stm.Ref - - case object Increment - - class Counter extends Transactor { - val count = Ref(0) - - override def atomically = { - case Increment => count alter (_ + 1) - } - } - -You could send this Counter transactor a ``Coordinated(Increment)`` message. If you were to send it just an ``Increment`` message it will create its own ``Coordinated`` (but in this particular case wouldn't be coordinating transactions with any other transactors). - -To coordinate with other transactors override the ``coordinate`` method. The ``coordinate`` method maps a message to a set of ``SendTo`` objects, pairs of ``ActorRef`` and a message. You can use the ``include`` and ``sendTo`` methods to easily coordinate with other transactors. The ``include`` method will send on the same message that was received to other transactors. The ``sendTo`` method allows you to specify both the actor to send to, and the message to send. - -Example of coordinating an increment: - -.. code-block:: scala - - import akka.transactor.Transactor - import akka.stm.Ref - import akka.actor.ActorRef - - case object Increment - - class FriendlyCounter(friend: ActorRef) extends Transactor { - val count = Ref(0) - - override def coordinate = { - case Increment => include(friend) - } - - override def atomically = { - case Increment => count alter (_ + 1) - } - } - -Using ``include`` to include more than one transactor: - -.. code-block:: scala - - override def coordinate = { - case Message => include(actor1, actor2, actor3) - } - -Using ``sendTo`` to coordinate transactions but pass-on a different message than the one that was received: - -.. code-block:: scala - - override def coordinate = { - case Message => sendTo(someActor -> SomeOtherMessage) - case SomeMessage => sendTo(actor1 -> Message1, actor2 -> Message2) - } - -To execute directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. These methods also expect partial functions like the receive method. They do not execute within the transaction. - -To completely bypass coordinated transactions override the ``normally`` method. Any message matched by ``normally`` will not be matched by the other methods, and will not be involved in coordinated transactions. In this method you can implement normal actor behavior, or use the normal STM atomic for local transactions. - - -Coordinating Typed Actors -------------------------- - -It's also possible to use coordinated transactions with typed actors. You can explicitly pass around ``Coordinated`` objects, or use built-in support with the ``@Coordinated`` annotation and the ``Coordination.coordinate`` method. - -To specify a method should use coordinated transactions add the ``@Coordinated`` annotation. **Note**: the ``@Coordinated`` annotation only works with methods that return Unit (one-way methods). - -.. code-block:: scala - - trait Counter { - @Coordinated def increment() - def get: Int - } - -To coordinate transactions use a ``coordinate`` block: - -.. code-block:: scala - - coordinate { - counter1.increment() - counter2.increment() - } - -Here's an example of using ``@Coordinated`` with a TypedActor to coordinate increments. - -.. code-block:: scala - - import akka.actor.TypedActor - import akka.stm.Ref - import akka.transactor.annotation.Coordinated - import akka.transactor.Coordination._ - - trait Counter { - @Coordinated def increment() - def get: Int - } - - class CounterImpl extends TypedActor with Counter { - val ref = Ref(0) - def increment() { ref alter (_ + 1) } - def get = ref.get - } - - ... - - val counter1 = TypedActor.newInstance(classOf[Counter], classOf[CounterImpl]) - val counter2 = TypedActor.newInstance(classOf[Counter], classOf[CounterImpl]) - - coordinate { - counter1.increment() - counter2.increment() - } - - TypedActor.stop(counter1) - TypedActor.stop(counter2) - -The ``coordinate`` block will wait for the transactions to complete. If you do not want to wait then you can specify this explicitly: - -.. code-block:: scala - - coordinate(wait = false) { - counter1.increment() - counter2.increment() - } - +Documentation of Akka Transactors has not been migrated to Akka 2.0-SNAPSHOT yet. \ No newline at end of file From f4b8e9cfa68000a599139603c8842cd13aff1e52 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 12 Dec 2011 14:23:34 +0100 Subject: [PATCH 27/27] DOC: added Henrik to team list --- akka-docs/dev/team.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-docs/dev/team.rst b/akka-docs/dev/team.rst index 8f636ddafa..36e0cd1339 100644 --- a/akka-docs/dev/team.rst +++ b/akka-docs/dev/team.rst @@ -26,4 +26,5 @@ Scott Clasen Committer Roland Kuhn Committer Patrik Nordwall Committer patrik DOT nordwall AT gmail DOT com Derek Williams Committer derek AT nebvin DOT ca +Henrik Engström Committer =================== ========================== ==================================== \ No newline at end of file