diff --git a/.gitignore b/.gitignore index c0fa0f10b4..28bd0c884d 100755 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,6 @@ run-codefellow multiverse.log .eprj .*.swp +akka-docs/_build/ +akka-tutorials/akka-tutorial-first/project/boot/ +akka-tutorials/akka-tutorial-first/project/plugins/project/ \ No newline at end of file diff --git a/akka-actor-tests/src/main/scala/akka/testing/TestBarrier.scala b/akka-actor-tests/src/main/scala/akka/testing/TestBarrier.scala new file mode 100644 index 0000000000..650ef7de79 --- /dev/null +++ b/akka-actor-tests/src/main/scala/akka/testing/TestBarrier.scala @@ -0,0 +1,40 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.testing + +import akka.util.Duration +import java.util.concurrent.{CyclicBarrier, TimeUnit, TimeoutException} + + +class TestBarrierTimeoutException(message: String) extends RuntimeException(message) + +/** + * A cyclic barrier wrapper for use in testing. + * It always uses a timeout when waiting and timeouts are specified as durations. + * Timeouts will always throw an exception. The default timeout is 5 seconds. + * Timeouts are multiplied by the testing time factor for Jenkins builds. + */ +object TestBarrier { + val DefaultTimeout = Duration(5, TimeUnit.SECONDS) + + def apply(count: Int) = new TestBarrier(count) +} + +class TestBarrier(count: Int) { + private val barrier = new CyclicBarrier(count) + + def await(): Unit = await(TestBarrier.DefaultTimeout) + + def await(timeout: Duration): Unit = { + try { + barrier.await(Testing.testTime(timeout.toNanos), TimeUnit.NANOSECONDS) + } catch { + case e: TimeoutException => + throw new TestBarrierTimeoutException("Timeout of %s and time factor of %s" format (timeout.toString, Testing.timeFactor)) + } + } + + def reset = barrier.reset +} diff --git a/akka-actor-tests/src/main/scala/akka/testing/TestLatch.scala b/akka-actor-tests/src/main/scala/akka/testing/TestLatch.scala new file mode 100644 index 0000000000..aeefa04dd7 --- /dev/null +++ b/akka-actor-tests/src/main/scala/akka/testing/TestLatch.scala @@ -0,0 +1,55 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.testing + +import akka.util.Duration +import java.util.concurrent.{CountDownLatch, TimeUnit} + + +class TestLatchTimeoutException(message: String) extends RuntimeException(message) +class TestLatchNoTimeoutException(message: String) extends RuntimeException(message) + +/** + * A count down latch wrapper for use in testing. + * It always uses a timeout when waiting and timeouts are specified as durations. + * There's a default timeout of 5 seconds and the default count is 1. + * Timeouts will always throw an exception (no need to wrap in assert in tests). + * Timeouts are multiplied by the testing time factor for Jenkins builds. + */ +object TestLatch { + val DefaultTimeout = Duration(5, TimeUnit.SECONDS) + + def apply(count: Int = 1) = new TestLatch(count) +} + +class TestLatch(count: Int = 1) { + private var latch = new CountDownLatch(count) + + def countDown() = latch.countDown() + + def open() = countDown() + + def await(): Boolean = await(TestLatch.DefaultTimeout) + + def await(timeout: Duration): Boolean = { + val opened = latch.await(Testing.testTime(timeout.toNanos), TimeUnit.NANOSECONDS) + if (!opened) throw new TestLatchTimeoutException( + "Timeout of %s with time factor of %s" format (timeout.toString, Testing.timeFactor)) + opened + } + + /** + * Timeout is expected. Throws exception if latch is opened before timeout. + */ + def awaitTimeout(timeout: Duration = TestLatch.DefaultTimeout) = { + val opened = latch.await(Testing.testTime(timeout.toNanos), TimeUnit.NANOSECONDS) + if (opened) throw new TestLatchNoTimeoutException( + "Latch opened before timeout of %s with time factor of %s" format (timeout.toString, Testing.timeFactor)) + opened + } + + def reset() = latch = new CountDownLatch(count) +} + diff --git a/akka-actor-tests/src/main/scala/akka/testing/Testing.scala b/akka-actor-tests/src/main/scala/akka/testing/Testing.scala new file mode 100644 index 0000000000..98733c5434 --- /dev/null +++ b/akka-actor-tests/src/main/scala/akka/testing/Testing.scala @@ -0,0 +1,33 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.testing + +import akka.util.Duration + +/** + * Multiplying numbers used in test timeouts by a factor, set by system property. + * Useful for Jenkins builds (where the machine may need more time). + */ +object Testing { + val timeFactor: Double = { + val factor = System.getProperty("akka.test.timefactor", "1.0") + try { + factor.toDouble + } catch { + case e: java.lang.NumberFormatException => 1.0 + } + } + + def testTime(t: Int): Int = (timeFactor * t).toInt + def testTime(t: Long): Long = (timeFactor * t).toLong + def testTime(t: Float): Float = (timeFactor * t).toFloat + def testTime(t: Double): Double = timeFactor * t + + def testSeconds(duration: Duration) = testTime(duration.toSeconds) + def testMillis(duration: Duration) = testTime(duration.toMillis) + def testNanos(duration: Duration) = testTime(duration.toNanos) + + def sleepFor(duration: Duration) = Thread.sleep(testTime(duration.toMillis)) +} diff --git a/akka-actor/src/test/java/akka/actor/JavaAPI.java b/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java similarity index 100% rename from akka-actor/src/test/java/akka/actor/JavaAPI.java rename to akka-actor-tests/src/test/java/akka/actor/JavaAPI.java diff --git a/akka-actor/src/test/java/akka/actor/JavaAPITestActor.java b/akka-actor-tests/src/test/java/akka/actor/JavaAPITestActor.java similarity index 100% rename from akka-actor/src/test/java/akka/actor/JavaAPITestActor.java rename to akka-actor-tests/src/test/java/akka/actor/JavaAPITestActor.java diff --git a/akka-actor/src/test/java/akka/config/SupervisionConfig.java b/akka-actor-tests/src/test/java/akka/config/SupervisionConfig.java similarity index 71% rename from akka-actor/src/test/java/akka/config/SupervisionConfig.java rename to akka-actor-tests/src/test/java/akka/config/SupervisionConfig.java index fd71c86bf1..97605a4a79 100644 --- a/akka-actor/src/test/java/akka/config/SupervisionConfig.java +++ b/akka-actor-tests/src/test/java/akka/config/SupervisionConfig.java @@ -9,13 +9,15 @@ import java.util.List; import static akka.config.Supervision.*; public class SupervisionConfig { - /*Just some sample code to demonstrate the declarative supervision configuration for Java */ + /*Just some sample code to demonstrate the declarative supervision configuration for Java */ + @SuppressWarnings("unchecked") public SupervisorConfig createSupervisorConfig(List toSupervise) { ArrayList targets = new ArrayList(toSupervise.size()); for(ActorRef ref : toSupervise) { targets.add(new Supervise(ref, permanent(), true)); } - return new SupervisorConfig(new AllForOneStrategy(new Class[] { Exception.class },50,1000), targets.toArray(new Server[0])); + + return new SupervisorConfig(new AllForOneStrategy(new Class[] { Exception.class }, 50, 1000), targets.toArray(new Server[targets.size()])); } } diff --git a/akka-actor/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java similarity index 100% rename from akka-actor/src/test/java/akka/dispatch/JavaFutureTests.java rename to akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java diff --git a/akka-actor/src/test/java/akka/japi/JavaAPITestBase.java b/akka-actor-tests/src/test/java/akka/japi/JavaAPITestBase.java similarity index 100% rename from akka-actor/src/test/java/akka/japi/JavaAPITestBase.java rename to akka-actor-tests/src/test/java/akka/japi/JavaAPITestBase.java diff --git a/akka-actor/src/test/scala/akka/Messages.scala b/akka-actor-tests/src/test/scala/akka/Messages.scala similarity index 100% rename from akka-actor/src/test/scala/akka/Messages.scala rename to akka-actor-tests/src/test/scala/akka/Messages.scala diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/ActorFireForgetRequestReplySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/ActorFireForgetRequestReplySpec.scala new file mode 100644 index 0000000000..f76c328573 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/ActorFireForgetRequestReplySpec.scala @@ -0,0 +1,97 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.actor + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import org.scalatest.BeforeAndAfterEach + +import akka.testing._ +import akka.testing.Testing.sleepFor +import akka.util.duration._ + +import Actor._ +import akka.config.Supervision._ +import akka.dispatch.Dispatchers + + +object ActorFireForgetRequestReplySpec { + + class ReplyActor extends Actor { + def receive = { + case "Send" => + self.reply("Reply") + case "SendImplicit" => + self.sender.get ! "ReplyImplicit" + } + } + + class CrashingTemporaryActor extends Actor { + self.lifeCycle = Temporary + + def receive = { + case "Die" => + state.finished.await + throw new Exception("Expected exception") + } + } + + class SenderActor(replyActor: ActorRef) extends Actor { + def receive = { + case "Init" => + replyActor ! "Send" + case "Reply" => { + state.s = "Reply" + state.finished.await + } + case "InitImplicit" => replyActor ! "SendImplicit" + case "ReplyImplicit" => { + state.s = "ReplyImplicit" + state.finished.await + } + } + } + + object state { + var s = "NIL" + val finished = TestBarrier(2) + } +} + +class ActorFireForgetRequestReplySpec extends WordSpec with MustMatchers with BeforeAndAfterEach { + import ActorFireForgetRequestReplySpec._ + + override def beforeEach() = { + state.finished.reset + } + + "An Actor" must { + + "reply to bang message using reply" in { + val replyActor = actorOf[ReplyActor].start() + val senderActor = actorOf(new SenderActor(replyActor)).start() + senderActor ! "Init" + state.finished.await + state.s must be ("Reply") + } + + "reply to bang message using implicit sender" in { + val replyActor = actorOf[ReplyActor].start() + val senderActor = actorOf(new SenderActor(replyActor)).start() + senderActor ! "InitImplicit" + state.finished.await + state.s must be ("ReplyImplicit") + } + + "should shutdown crashed temporary actor" in { + val actor = actorOf[CrashingTemporaryActor].start() + actor.isRunning must be (true) + actor ! "Die" + state.finished.await + sleepFor(1 second) + actor.isShutdown must be (true) + } + } +} diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/ActorRefSpec.scala new file mode 100644 index 0000000000..6c4374809e --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/ActorRefSpec.scala @@ -0,0 +1,177 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.actor + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers + +import akka.testing._ +import akka.util.duration._ +import akka.testing.Testing.sleepFor +import akka.config.Supervision.{OneForOneStrategy} +import akka.actor._ +import akka.dispatch.Future +import java.util.concurrent.{TimeUnit, CountDownLatch} + +object ActorRefSpec { + + val latch = TestLatch(4) + + class ReplyActor extends Actor { + var replyTo: Channel[Any] = null + + def receive = { + case "complexRequest" => { + replyTo = self.channel + val worker = Actor.actorOf[WorkerActor].start() + worker ! "work" + } + case "complexRequest2" => + val worker = Actor.actorOf[WorkerActor].start() + worker ! self.channel + case "workDone" => replyTo ! "complexReply" + case "simpleRequest" => self.reply("simpleReply") + } + } + + class WorkerActor() extends Actor { + def receive = { + case "work" => { + work + self.reply("workDone") + self.stop() + } + case replyTo: Channel[Any] => { + work + replyTo ! "complexReply" + } + } + + private def work { + sleepFor(1 second) + } + } + + class SenderActor(replyActor: ActorRef) extends Actor { + + def receive = { + case "complex" => replyActor ! "complexRequest" + case "complex2" => replyActor ! "complexRequest2" + case "simple" => replyActor ! "simpleRequest" + case "complexReply" => { + latch.countDown() + } + case "simpleReply" => { + latch.countDown() + } + } + } +} + +class ActorRefSpec extends WordSpec with MustMatchers { + import ActorRefSpec._ + + "An ActorRef" must { + + "not allow Actors to be created outside of an actorOf" in { + intercept[akka.actor.ActorInitializationException] { + new Actor { def receive = { case _ => } } + fail("shouldn't get here") + } + + intercept[akka.actor.ActorInitializationException] { + val a = Actor.actorOf(new Actor { + val nested = new Actor { def receive = { case _ => } } + def receive = { case _ => } + }).start() + fail("shouldn't get here") + } + } + + "support nested actorOfs" in { + val a = Actor.actorOf(new Actor { + val nested = Actor.actorOf(new Actor { def receive = { case _ => } }).start() + def receive = { case _ => self reply nested } + }).start() + + val nested = (a !! "any").get.asInstanceOf[ActorRef] + a must not be null + nested must not be null + (a ne nested) must be === true + } + + "support reply via channel" in { + val serverRef = Actor.actorOf[ReplyActor].start() + val clientRef = Actor.actorOf(new SenderActor(serverRef)).start() + + clientRef ! "complex" + clientRef ! "simple" + clientRef ! "simple" + clientRef ! "simple" + + latch.await + + latch.reset + + clientRef ! "complex2" + clientRef ! "simple" + clientRef ! "simple" + clientRef ! "simple" + + latch.await + + clientRef.stop() + serverRef.stop() + } + + "stop when sent a poison pill" in { + val ref = Actor.actorOf( + new Actor { + def receive = { + case 5 => self reply_? "five" + case null => self reply_? "null" + } + } + ).start() + + val ffive: Future[String] = ref !!! 5 + val fnull: Future[String] = ref !!! null + + intercept[ActorKilledException] { + ref !! PoisonPill + fail("shouldn't get here") + } + + ffive.resultOrException.get must be ("five") + fnull.resultOrException.get must be ("null") + + ref.isRunning must be (false) + ref.isShutdown must be (true) + } + + "restart when Kill:ed" in { + val latch = new CountDownLatch(2) + + val boss = Actor.actorOf(new Actor{ + self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), scala.Some(2), scala.Some(1000)) + + val ref = Actor.actorOf( + new Actor { + def receive = { case _ => } + override def preRestart(reason: Throwable) = latch.countDown() + override def postRestart(reason: Throwable) = latch.countDown() + } + ).start() + + self link ref + + protected def receive = { case "sendKill" => ref ! Kill } + }).start() + + boss ! "sendKill" + latch.await(5, TimeUnit.SECONDS) must be === true + } + } +} diff --git a/akka-actor/src/test/scala/akka/actor/actor/Bench.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/Bench.scala similarity index 97% rename from akka-actor/src/test/scala/akka/actor/actor/Bench.scala rename to akka-actor-tests/src/test/scala/akka/actor/actor/Bench.scala index f043f5c92e..f018de635c 100644 --- a/akka-actor/src/test/scala/akka/actor/actor/Bench.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/Bench.scala @@ -28,7 +28,7 @@ object Chameneos { class Chameneo(var mall: ActorRef, var colour: Colour, cid: Int) extends Actor { var meetings = 0 - self.start + self.start() mall ! Meet(self, colour) def receive = { @@ -88,7 +88,7 @@ object Chameneos { sumMeetings += i if (numFaded == numChameneos) { Chameneos.end = System.currentTimeMillis - self.stop + self.stop() } case msg @ Meet(a, c) => @@ -110,7 +110,7 @@ object Chameneos { def run { // System.setProperty("akka.config", "akka.conf") Chameneos.start = System.currentTimeMillis - actorOf(new Mall(1000000, 4)).start + actorOf(new Mall(1000000, 4)).start() Thread.sleep(10000) println("Elapsed: " + (end - start)) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/FSMActorSpec.scala new file mode 100644 index 0000000000..9489c1e64f --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/FSMActorSpec.scala @@ -0,0 +1,148 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.actor + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers + +import akka.testing._ + +import FSM._ +import akka.util.Duration +import akka.util.duration._ + + +object FSMActorSpec { + + val unlockedLatch = TestLatch() + val lockedLatch = TestLatch() + val unhandledLatch = TestLatch() + val terminatedLatch = TestLatch() + val transitionLatch = TestLatch() + val initialStateLatch = TestLatch() + val transitionCallBackLatch = TestLatch() + + sealed trait LockState + case object Locked extends LockState + case object Open extends LockState + + class Lock(code: String, timeout: Duration) extends Actor with FSM[LockState, CodeState] { + + startWith(Locked, CodeState("", code)) + + when(Locked) { + case Event(digit: Char, CodeState(soFar, code)) => { + soFar + digit match { + case incomplete if incomplete.length < code.length => + stay using CodeState(incomplete, code) + case codeTry if (codeTry == code) => { + doUnlock + goto(Open) using CodeState("", code) forMax timeout + } + case wrong => { + stay using CodeState("", code) + } + } + } + case Event("hello", _) => stay replying "world" + case Event("bye", _) => stop(Shutdown) + } + + when(Open) { + case Event(StateTimeout, _) => { + doLock + goto(Locked) + } + } + + whenUnhandled { + case Event(_, stateData) => { + unhandledLatch.open + stay + } + } + + onTransition { + case Locked -> Open => transitionLatch.open + } + + // verify that old-style does still compile + onTransition (transitionHandler _) + + def transitionHandler(from: LockState, to: LockState) = { + // dummy + } + + onTermination { + case StopEvent(Shutdown, Locked, _) => + // stop is called from lockstate with shutdown as reason... + terminatedLatch.open + } + + // initialize the lock + initialize + + private def doLock() { + lockedLatch.open + } + + private def doUnlock = { + unlockedLatch.open + } + } + + case class CodeState(soFar: String, code: String) +} + +class FSMActorSpec extends WordSpec with MustMatchers { + import FSMActorSpec._ + + "An FSM Actor" must { + + "unlock the lock" in { + + // lock that locked after being open for 1 sec + val lock = Actor.actorOf(new Lock("33221", 1 second)).start() + + val transitionTester = Actor.actorOf(new Actor { def receive = { + case Transition(_, _, _) => transitionCallBackLatch.open + case CurrentState(_, Locked) => initialStateLatch.open + }}).start() + + lock ! SubscribeTransitionCallBack(transitionTester) + initialStateLatch.await + + lock ! '3' + lock ! '3' + lock ! '2' + lock ! '2' + lock ! '1' + + unlockedLatch.await + transitionLatch.await + transitionCallBackLatch.await + lockedLatch.await + + lock ! "not_handled" + unhandledLatch.await + + val answerLatch = TestLatch() + object Hello + object Bye + val tester = Actor.actorOf(new Actor { + protected def receive = { + case Hello => lock ! "hello" + case "world" => answerLatch.open + case Bye => lock ! "bye" + } + }).start() + tester ! Hello + answerLatch.await + + tester ! Bye + terminatedLatch.await + } + } +} diff --git a/akka-actor/src/test/scala/akka/actor/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTimingSpec.scala similarity index 94% rename from akka-actor/src/test/scala/akka/actor/actor/FSMTimingSpec.scala rename to akka-actor-tests/src/test/scala/akka/actor/actor/FSMTimingSpec.scala index a59785ab7a..606ac280b7 100644 --- a/akka-actor/src/test/scala/akka/actor/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTimingSpec.scala @@ -1,20 +1,17 @@ package akka.actor -import akka.testkit.TestKit -import akka.util.duration._ - import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers -class FSMTimingSpec - extends WordSpec - with MustMatchers - with TestKit { +import akka.testkit.TestKit +import akka.util.duration._ + +class FSMTimingSpec extends WordSpec with MustMatchers with TestKit { import FSMTimingSpec._ import FSM._ - val fsm = Actor.actorOf(new StateMachine(testActor)).start + val fsm = Actor.actorOf(new StateMachine(testActor)).start() fsm ! SubscribeTransitionCallBack(testActor) expectMsg(200 millis, CurrentState(fsm, Initial)) @@ -140,4 +137,3 @@ object FSMTimingSpec { } -// vim: set ts=2 sw=2 et: diff --git a/akka-actor/src/test/scala/akka/actor/actor/ForwardActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/ForwardActorSpec.scala similarity index 50% rename from akka-actor/src/test/scala/akka/actor/actor/ForwardActorSpec.scala rename to akka-actor-tests/src/test/scala/akka/actor/actor/ForwardActorSpec.scala index 3a1efe1fe8..29d9fc5e10 100644 --- a/akka-actor/src/test/scala/akka/actor/actor/ForwardActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/ForwardActorSpec.scala @@ -1,22 +1,29 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + package akka.actor -import java.util.concurrent.{TimeUnit, CountDownLatch} -import org.scalatest.junit.JUnitSuite -import org.junit.Test +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers + +import akka.testing._ +import akka.util.duration._ import Actor._ + object ForwardActorSpec { object ForwardState { var sender: Option[ActorRef] = None } class ReceiverActor extends Actor { - val latch = new CountDownLatch(1) + val latch = TestLatch() def receive = { case "SendBang" => { ForwardState.sender = self.sender - latch.countDown + latch.countDown() } case "SendBangBang" => self.reply("SendBangBang") } @@ -25,7 +32,7 @@ object ForwardActorSpec { class ForwardActor extends Actor { val receiverActor = actorOf[ReceiverActor] - receiverActor.start + receiverActor.start() def receive = { case "SendBang" => receiverActor.forward("SendBang") case "SendBangBang" => receiverActor.forward("SendBangBang") @@ -34,7 +41,7 @@ object ForwardActorSpec { class BangSenderActor extends Actor { val forwardActor = actorOf[ForwardActor] - forwardActor.start + forwardActor.start() forwardActor ! "SendBang" def receive = { case _ => {} @@ -42,11 +49,11 @@ object ForwardActorSpec { } class BangBangSenderActor extends Actor { - val latch = new CountDownLatch(1) + val latch = TestLatch() val forwardActor = actorOf[ForwardActor] - forwardActor.start + forwardActor.start() (forwardActor !! "SendBangBang") match { - case Some(_) => latch.countDown + case Some(_) => latch.countDown() case None => {} } def receive = { @@ -55,27 +62,27 @@ object ForwardActorSpec { } } -class ForwardActorSpec extends JUnitSuite { +class ForwardActorSpec extends WordSpec with MustMatchers { import ForwardActorSpec._ - @Test - def shouldForwardActorReferenceWhenInvokingForwardOnBang { - val senderActor = actorOf[BangSenderActor] - val latch = senderActor.actor.asInstanceOf[BangSenderActor] + "A Forward Actor" must { + "forward actor reference when invoking forward on bang" in { + val senderActor = actorOf[BangSenderActor] + val latch = senderActor.actor.asInstanceOf[BangSenderActor] .forwardActor.actor.asInstanceOf[ForwardActor] .receiverActor.actor.asInstanceOf[ReceiverActor] .latch - senderActor.start - assert(latch.await(1L, TimeUnit.SECONDS)) - assert(ForwardState.sender ne null) - assert(senderActor.toString === ForwardState.sender.get.toString) - } + senderActor.start() + latch.await + ForwardState.sender must not be (null) + senderActor.toString must be (ForwardState.sender.get.toString) + } - @Test - def shouldForwardActorReferenceWhenInvokingForwardOnBangBang { - val senderActor = actorOf[BangBangSenderActor] - senderActor.start - val latch = senderActor.actor.asInstanceOf[BangBangSenderActor].latch - assert(latch.await(1L, TimeUnit.SECONDS)) + "forward actor reference when invoking forward on bang bang" in { + val senderActor = actorOf[BangBangSenderActor] + senderActor.start() + val latch = senderActor.actor.asInstanceOf[BangBangSenderActor].latch + latch.await + } } } diff --git a/akka-actor/src/test/scala/akka/actor/actor/HotSwapSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/HotSwapSpec.scala similarity index 88% rename from akka-actor/src/test/scala/akka/actor/actor/HotSwapSpec.scala rename to akka-actor-tests/src/test/scala/akka/actor/actor/HotSwapSpec.scala index 011141c746..6cb1da93e8 100644 --- a/akka-actor/src/test/scala/akka/actor/actor/HotSwapSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/HotSwapSpec.scala @@ -1,21 +1,27 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + package akka.actor import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers + +import akka.testing._ + import Actor._ -import java.util.concurrent.CyclicBarrier class HotSwapSpec extends WordSpec with MustMatchers { - "An Actor" should { + "An Actor" must { "be able to hotswap its behavior with HotSwap(..)" in { - val barrier = new CyclicBarrier(2) + val barrier = TestBarrier(2) @volatile var _log = "" val a = actorOf( new Actor { def receive = { case _ => _log += "default" } - }).start + }).start() a ! HotSwap( self => { case _ => _log += "swapped" @@ -27,7 +33,7 @@ class HotSwapSpec extends WordSpec with MustMatchers { } "be able to hotswap its behavior with become(..)" in { - val barrier = new CyclicBarrier(2) + val barrier = TestBarrier(2) @volatile var _log = "" val a = actorOf(new Actor { def receive = { @@ -40,7 +46,7 @@ class HotSwapSpec extends WordSpec with MustMatchers { barrier.await }) } - }).start + }).start() a ! "init" barrier.await @@ -55,7 +61,7 @@ class HotSwapSpec extends WordSpec with MustMatchers { } "be able to revert hotswap its behavior with RevertHotSwap(..)" in { - val barrier = new CyclicBarrier(2) + val barrier = TestBarrier(2) @volatile var _log = "" val a = actorOf( new Actor { def receive = { @@ -63,7 +69,7 @@ class HotSwapSpec extends WordSpec with MustMatchers { _log += "init" barrier.await } - }).start + }).start() a ! "init" barrier.await @@ -100,7 +106,7 @@ class HotSwapSpec extends WordSpec with MustMatchers { } "be able to revert hotswap its behavior with unbecome" in { - val barrier = new CyclicBarrier(2) + val barrier = TestBarrier(2) @volatile var _log = "" val a = actorOf(new Actor { def receive = { @@ -113,11 +119,11 @@ class HotSwapSpec extends WordSpec with MustMatchers { _log += "swapped" barrier.await case "revert" => - unbecome + unbecome() }) barrier.await } - }).start + }).start() a ! "init" barrier.await diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/ReceiveTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/ReceiveTimeoutSpec.scala new file mode 100644 index 0000000000..00298e10c0 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/ReceiveTimeoutSpec.scala @@ -0,0 +1,120 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.actor + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers + +import akka.testing._ +import akka.util.duration._ + +import Actor._ +import java.util.concurrent.atomic.AtomicInteger + + +class ReceiveTimeoutSpec extends WordSpec with MustMatchers { + import Actor._ + + "An actor with receive timeout" must { + + "get timeout" in { + val timeoutLatch = TestLatch() + + val timeoutActor = actorOf(new Actor { + self.receiveTimeout = Some(500L) + + protected def receive = { + case ReceiveTimeout => timeoutLatch.open + } + }).start() + + timeoutLatch.await + timeoutActor.stop() + } + + "get timeout when swapped" in { + val timeoutLatch = TestLatch() + + val timeoutActor = actorOf(new Actor { + self.receiveTimeout = Some(500L) + + protected def receive = { + case ReceiveTimeout => timeoutLatch.open + } + }).start() + + timeoutLatch.await + + val swappedLatch = TestLatch() + + timeoutActor ! HotSwap(self => { + case ReceiveTimeout => swappedLatch.open + }) + + swappedLatch.await + timeoutActor.stop() + } + + "reschedule timeout after regular receive" in { + val timeoutLatch = TestLatch() + case object Tick + + val timeoutActor = actorOf(new Actor { + self.receiveTimeout = Some(500L) + + protected def receive = { + case Tick => () + case ReceiveTimeout => timeoutLatch.open + } + }).start() + + timeoutActor ! Tick + + timeoutLatch.await + timeoutActor.stop() + } + + "be able to turn off timeout if desired" in { + val count = new AtomicInteger(0) + val timeoutLatch = TestLatch() + case object Tick + + val timeoutActor = actorOf(new Actor { + self.receiveTimeout = Some(500L) + + protected def receive = { + case Tick => () + case ReceiveTimeout => + count.incrementAndGet + timeoutLatch.open + self.receiveTimeout = None + } + }).start() + + timeoutActor ! Tick + + timeoutLatch.await + count.get must be (1) + timeoutActor.stop() + } + + "not receive timeout message when not specified" in { + val timeoutLatch = TestLatch() + + val timeoutActor = actorOf(new Actor { + protected def receive = { + case ReceiveTimeout => timeoutLatch.open + } + }).start() + + timeoutLatch.awaitTimeout(1 second) // timeout expected + timeoutActor.stop() + } + + "have ReceiveTimeout eq to Actors ReceiveTimeout" in { + akka.actor.Actors.receiveTimeout() must be theSameInstanceAs (ReceiveTimeout) + } + } +} diff --git a/akka-actor/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala similarity index 96% rename from akka-actor/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala rename to akka-actor-tests/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala index 741cd7a49e..f2a3103d08 100644 --- a/akka-actor/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala @@ -25,7 +25,7 @@ class RestartStrategySpec extends JUnitSuite { val boss = actorOf(new Actor{ self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), Some(2), Some(1000)) protected def receive = { case _ => () } - }).start + }).start() val restartLatch = new StandardLatch val secondRestartLatch = new StandardLatch @@ -36,7 +36,7 @@ class RestartStrategySpec extends JUnitSuite { val slave = actorOf(new Actor{ protected def receive = { - case Ping => countDownLatch.countDown + case Ping => countDownLatch.countDown() case Crash => throw new Exception("Crashing...") } override def postRestart(reason: Throwable) = { @@ -80,7 +80,7 @@ class RestartStrategySpec extends JUnitSuite { val boss = actorOf(new Actor{ self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), None, None) protected def receive = { case _ => () } - }).start + }).start() val countDownLatch = new CountDownLatch(100) @@ -91,7 +91,7 @@ class RestartStrategySpec extends JUnitSuite { } override def postRestart(reason: Throwable) = { - countDownLatch.countDown + countDownLatch.countDown() } }) @@ -107,7 +107,7 @@ class RestartStrategySpec extends JUnitSuite { val boss = actorOf(new Actor{ self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), Some(2), Some(500)) protected def receive = { case _ => () } - }).start + }).start() val restartLatch = new StandardLatch val secondRestartLatch = new StandardLatch @@ -168,7 +168,7 @@ class RestartStrategySpec extends JUnitSuite { val boss = actorOf(new Actor{ self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), Some(2), None) protected def receive = { case _ => () } - }).start + }).start() val restartLatch = new StandardLatch val secondRestartLatch = new StandardLatch @@ -179,7 +179,7 @@ class RestartStrategySpec extends JUnitSuite { val slave = actorOf(new Actor{ protected def receive = { - case Ping => countDownLatch.countDown + case Ping => countDownLatch.countDown() case Crash => throw new Exception("Crashing...") } override def postRestart(reason: Throwable) = { @@ -230,12 +230,12 @@ class RestartStrategySpec extends JUnitSuite { protected def receive = { case m:MaximumNumberOfRestartsWithinTimeRangeReached => maxNoOfRestartsLatch.open } - }).start + }).start() val slave = actorOf(new Actor{ protected def receive = { - case Ping => countDownLatch.countDown + case Ping => countDownLatch.countDown() case Crash => throw new Exception("Crashing...") } diff --git a/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorHierarchySpec.scala similarity index 97% rename from akka-actor/src/test/scala/akka/actor/supervisor/SupervisorHierarchySpec.scala rename to akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorHierarchySpec.scala index 0fee4b77b5..529d2ef208 100644 --- a/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorHierarchySpec.scala @@ -17,7 +17,7 @@ object SupervisorHierarchySpec { class CountDownActor(countDown: CountDownLatch) extends Actor { protected def receive = { case _ => () } - override def postRestart(reason: Throwable) = countDown.countDown + override def postRestart(reason: Throwable) = countDown.countDown() } class CrasherActor extends Actor { @@ -40,7 +40,7 @@ class SupervisorHierarchySpec extends JUnitSuite { self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 5, 1000) protected def receive = { case _ => () } - }).start + }).start() val manager = actorOf(new CountDownActor(countDown)) boss.startLink(manager) @@ -65,9 +65,9 @@ class SupervisorHierarchySpec extends JUnitSuite { self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 1, 5000) protected def receive = { case MaximumNumberOfRestartsWithinTimeRangeReached(_, _, _, _) => - countDown.countDown + countDown.countDown() } - }).start + }).start() boss.startLink(crasher) crasher ! Exit(crasher, new FireWorkerException("Fire the worker!")) diff --git a/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorMiscSpec.scala similarity index 95% rename from akka-actor/src/test/scala/akka/actor/supervisor/SupervisorMiscSpec.scala rename to akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorMiscSpec.scala index 78547b4d19..5a812c3f8a 100644 --- a/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorMiscSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorMiscSpec.scala @@ -17,43 +17,43 @@ class SupervisorMiscSpec extends WordSpec with MustMatchers { val actor1 = Actor.actorOf(new Actor { self.dispatcher = Dispatchers.newThreadBasedDispatcher(self) - override def postRestart(cause: Throwable) {countDownLatch.countDown} + override def postRestart(cause: Throwable) {countDownLatch.countDown()} protected def receive = { case "kill" => throw new Exception("killed") case _ => println("received unknown message") } - }).start + }).start() val actor2 = Actor.actorOf(new Actor { self.dispatcher = Dispatchers.newThreadBasedDispatcher(self) - override def postRestart(cause: Throwable) {countDownLatch.countDown} + override def postRestart(cause: Throwable) {countDownLatch.countDown()} protected def receive = { case "kill" => throw new Exception("killed") case _ => println("received unknown message") } - }).start + }).start() val actor3 = Actor.actorOf(new Actor { self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("test").build - override def postRestart(cause: Throwable) {countDownLatch.countDown} + override def postRestart(cause: Throwable) {countDownLatch.countDown()} protected def receive = { case "kill" => throw new Exception("killed") case _ => println("received unknown message") } - }).start + }).start() val actor4 = Actor.actorOf(new Actor { self.dispatcher = Dispatchers.newThreadBasedDispatcher(self) - override def postRestart(cause: Throwable) {countDownLatch.countDown} + override def postRestart(cause: Throwable) {countDownLatch.countDown()} protected def receive = { case "kill" => throw new Exception("killed") case _ => println("received unknown message") } - }).start + }).start() val sup = Supervisor( SupervisorConfig( diff --git a/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala new file mode 100644 index 0000000000..253570f576 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala @@ -0,0 +1,387 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.actor + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import org.scalatest.BeforeAndAfterEach + +import akka.testing._ +import akka.testing.Testing.{testMillis, sleepFor} +import akka.util.duration._ +import akka.config.Supervision._ +import akka.{Die, Ping} +import Actor._ + +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.LinkedBlockingQueue + + +object SupervisorSpec { + val Timeout = 5 seconds + val TimeoutMillis = testMillis(Timeout).toInt + + // ===================================================== + // Message logs + // ===================================================== + + val PingMessage = "ping" + val PongMessage = "pong" + val ExceptionMessage = "Expected exception; to test fault-tolerance" + + var messageLog = new LinkedBlockingQueue[String] + + def messageLogPoll = messageLog.poll(Timeout.length, Timeout.unit) + + // ===================================================== + // Actors + // ===================================================== + + class PingPongActor extends Actor { + def receive = { + case Ping => + messageLog.put(PingMessage) + self.reply_?(PongMessage) + case Die => + throw new RuntimeException(ExceptionMessage) + } + + override def postRestart(reason: Throwable) { + messageLog.put(reason.getMessage) + } + } + + class TemporaryActor extends PingPongActor { + self.lifeCycle = Temporary + } + + class Master extends Actor { + self.faultHandler = OneForOneStrategy(List(classOf[Exception]), 5, testMillis(1 second).toInt) + + val temp = self.spawnLink[TemporaryActor] + + override def receive = { + case Die => temp !! (Die, TimeoutMillis) + } + } + + // ===================================================== + // Creating actors and supervisors + // ===================================================== + + def temporaryActorAllForOne = { + val temporaryActor = actorOf[TemporaryActor].start() + + val supervisor = Supervisor( + SupervisorConfig( + AllForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis), + Supervise( + temporaryActor, + Temporary) + :: Nil)) + + (temporaryActor, supervisor) + } + + def singleActorAllForOne = { + val pingpong = actorOf[PingPongActor].start() + + val supervisor = Supervisor( + SupervisorConfig( + AllForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis), + Supervise( + pingpong, + Permanent) + :: Nil)) + + (pingpong, supervisor) + } + + def singleActorOneForOne = { + val pingpong = actorOf[PingPongActor].start() + + val supervisor = Supervisor( + SupervisorConfig( + OneForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis), + Supervise( + pingpong, + Permanent) + :: Nil)) + + (pingpong, supervisor) + } + + def multipleActorsAllForOne = { + val pingpong1 = actorOf[PingPongActor].start() + val pingpong2 = actorOf[PingPongActor].start() + val pingpong3 = actorOf[PingPongActor].start() + + val supervisor = Supervisor( + SupervisorConfig( + AllForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis), + Supervise( + pingpong1, + Permanent) + :: + Supervise( + pingpong2, + Permanent) + :: + Supervise( + pingpong3, + Permanent) + :: Nil)) + + (pingpong1, pingpong2, pingpong3, supervisor) + } + + def multipleActorsOneForOne = { + val pingpong1 = actorOf[PingPongActor].start() + val pingpong2 = actorOf[PingPongActor].start() + val pingpong3 = actorOf[PingPongActor].start() + + val supervisor = Supervisor( + SupervisorConfig( + OneForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis), + Supervise( + pingpong1, + Permanent) + :: + Supervise( + pingpong2, + Permanent) + :: + Supervise( + pingpong3, + Permanent) + :: Nil)) + + (pingpong1, pingpong2, pingpong3, supervisor) + } + + def nestedSupervisorsAllForOne = { + val pingpong1 = actorOf[PingPongActor] + val pingpong2 = actorOf[PingPongActor] + val pingpong3 = actorOf[PingPongActor] + + val supervisor = Supervisor( + SupervisorConfig( + AllForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis), + Supervise( + pingpong1, + Permanent) + :: + SupervisorConfig( + AllForOneStrategy(Nil, 3, TimeoutMillis), + Supervise( + pingpong2, + Permanent) + :: + Supervise( + pingpong3, + Permanent) + :: Nil) + :: Nil)) + + (pingpong1, pingpong2, pingpong3, supervisor) + } +} + +class SupervisorSpec extends WordSpec with MustMatchers with BeforeAndAfterEach { + import SupervisorSpec._ + + override def beforeEach() = { + messageLog.clear + } + + def ping(pingPongActor: ActorRef) = { + (pingPongActor !! (Ping, TimeoutMillis)).getOrElse("nil") must be (PongMessage) + messageLogPoll must be (PingMessage) + } + + def kill(pingPongActor: ActorRef) = { + intercept[RuntimeException] { pingPongActor !! (Die, TimeoutMillis) } + messageLogPoll must be (ExceptionMessage) + } + + "A supervisor" must { + + "not restart programmatically linked temporary actor" in { + val master = actorOf[Master].start() + + intercept[RuntimeException] { + master !! (Die, TimeoutMillis) + } + + sleepFor(1 second) + messageLog.size must be (0) + } + + "not restart temporary actor" in { + val (temporaryActor, supervisor) = temporaryActorAllForOne + + intercept[RuntimeException] { + temporaryActor !! (Die, TimeoutMillis) + } + + sleepFor(1 second) + messageLog.size must be (0) + } + + "start server for nested supervisor hierarchy" in { + val (actor1, actor2, actor3, supervisor) = nestedSupervisorsAllForOne + ping(actor1) + } + + "kill single actor OneForOne" in { + val (actor, supervisor) = singleActorOneForOne + kill(actor) + } + + "call-kill-call single actor OneForOne" in { + val (actor, supervisor) = singleActorOneForOne + ping(actor) + kill(actor) + ping(actor) + } + + "kill single actor AllForOne" in { + val (actor, supervisor) = singleActorAllForOne + kill(actor) + } + + "call-kill-call single actor AllForOne" in { + val (actor, supervisor) = singleActorAllForOne + ping(actor) + kill(actor) + ping(actor) + } + + "kill multiple actors OneForOne 1" in { + val (actor1, actor2, actor3, supervisor) = multipleActorsOneForOne + kill(actor1) + } + + "kill multiple actors OneForOne 2" in { + val (actor1, actor2, actor3, supervisor) = multipleActorsOneForOne + kill(actor3) + } + + "call-kill-call multiple actors OneForOne" in { + val (actor1, actor2, actor3, supervisor) = multipleActorsOneForOne + + ping(actor1) + ping(actor2) + ping(actor3) + + kill(actor2) + + ping(actor1) + ping(actor2) + ping(actor3) + } + + "kill multiple actors AllForOne" in { + val (actor1, actor2, actor3, supervisor) = multipleActorsAllForOne + + kill(actor2) + + // and two more exception messages + messageLogPoll must be (ExceptionMessage) + messageLogPoll must be (ExceptionMessage) + } + + "call-kill-call multiple actors AllForOne" in { + val (actor1, actor2, actor3, supervisor) = multipleActorsAllForOne + + ping(actor1) + ping(actor2) + ping(actor3) + + kill(actor2) + + // and two more exception messages + messageLogPoll must be (ExceptionMessage) + messageLogPoll must be (ExceptionMessage) + + ping(actor1) + ping(actor2) + ping(actor3) + } + + "one-way kill single actor OneForOne" in { + val (actor, supervisor) = singleActorOneForOne + + actor ! Die + messageLogPoll must be (ExceptionMessage) + } + + "one-way call-kill-call single actor OneForOne" in { + val (actor, supervisor) = singleActorOneForOne + + actor ! Ping + messageLogPoll must be (PingMessage) + + actor ! Die + messageLogPoll must be (ExceptionMessage) + + actor ! Ping + messageLogPoll must be (PingMessage) + } + + "restart killed actors in nested superviser hierarchy" in { + val (actor1, actor2, actor3, supervisor) = nestedSupervisorsAllForOne + + ping(actor1) + ping(actor2) + ping(actor3) + + kill(actor2) + + // and two more exception messages + messageLogPoll must be (ExceptionMessage) + messageLogPoll must be (ExceptionMessage) + + ping(actor1) + ping(actor2) + ping(actor3) + } + + "must attempt restart when exception during restart" in { + val inits = new AtomicInteger(0) + + val dyingActor = actorOf(new Actor { + self.lifeCycle = Permanent + inits.incrementAndGet + + if (inits.get % 2 == 0) throw new IllegalStateException("Don't wanna!") + + def receive = { + case Ping => self.reply_?(PongMessage) + case Die => throw new Exception("expected") + } + }) + + val supervisor = + Supervisor( + SupervisorConfig( + OneForOneStrategy(classOf[Exception] :: Nil, 3, 10000), + Supervise(dyingActor, Permanent) :: Nil)) + + intercept[Exception] { + dyingActor !! (Die, TimeoutMillis) + } + + // give time for restart + sleepFor(3 seconds) + + (dyingActor !! (Ping, TimeoutMillis)).getOrElse("nil") must be (PongMessage) + + inits.get must be (3) + + supervisor.shutdown + } + } +} diff --git a/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorTreeSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorTreeSpec.scala similarity index 80% rename from akka-actor/src/test/scala/akka/actor/supervisor/SupervisorTreeSpec.scala rename to akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorTreeSpec.scala index cb694d7408..d298b2c930 100644 --- a/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorTreeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorTreeSpec.scala @@ -6,14 +6,16 @@ package akka.actor import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers +import akka.util.duration._ +import akka.testing.Testing.sleepFor import akka.dispatch.Dispatchers import akka.config.Supervision.{SupervisorConfig, OneForOneStrategy, Supervise, Permanent} import Actor._ -class SupervisorTreeSpec extends WordSpec with MustMatchers { - +class SupervisorTreeSpec extends WordSpec with MustMatchers { var log = "" case object Die + class Chainer(myId: String, a: Option[ActorRef] = None) extends Actor { self.id = myId self.lifeCycle = Permanent @@ -29,17 +31,17 @@ class SupervisorTreeSpec extends WordSpec with MustMatchers { } } - "In a 3 levels deep supervisor tree (linked in the constructor) we" should { + "In a 3 levels deep supervisor tree (linked in the constructor) we" must { "be able to kill the middle actor and see itself and its child restarted" in { log = "INIT" - val lastActor = actorOf(new Chainer("lastActor")).start - val middleActor = actorOf(new Chainer("middleActor", Some(lastActor))).start - val headActor = actorOf(new Chainer("headActor", Some(middleActor))).start + val lastActor = actorOf(new Chainer("lastActor")).start() + val middleActor = actorOf(new Chainer("middleActor", Some(lastActor))).start() + val headActor = actorOf(new Chainer("headActor", Some(middleActor))).start() middleActor ! Die - Thread.sleep(100) + sleepFor(500 millis) log must equal ("INITmiddleActorlastActor") } } diff --git a/akka-actor/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala similarity index 86% rename from akka-actor/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala rename to akka-actor-tests/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala index 54c8179152..206d06d1c4 100644 --- a/akka-actor/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala @@ -14,12 +14,12 @@ import org.scalatest.matchers.MustMatchers class Ticket669Spec extends WordSpec with MustMatchers with BeforeAndAfterAll { import Ticket669Spec._ - override def afterAll = Actor.registry.shutdownAll + override def afterAll = Actor.registry.shutdownAll() "A supervised actor with lifecycle PERMANENT" should { "be able to reply on failure during preRestart" in { val latch = new CountDownLatch(1) - val sender = Actor.actorOf(new Sender(latch)).start + val sender = Actor.actorOf(new Sender(latch)).start() val supervised = Actor.actorOf[Supervised] val supervisor = Supervisor(SupervisorConfig( @@ -33,7 +33,7 @@ class Ticket669Spec extends WordSpec with MustMatchers with BeforeAndAfterAll { "be able to reply on failure during postStop" in { val latch = new CountDownLatch(1) - val sender = Actor.actorOf(new Sender(latch)).start + val sender = Actor.actorOf(new Sender(latch)).start() val supervised = Actor.actorOf[Supervised] val supervisor = Supervisor(SupervisorConfig( @@ -50,8 +50,8 @@ class Ticket669Spec extends WordSpec with MustMatchers with BeforeAndAfterAll { object Ticket669Spec { class Sender(latch: CountDownLatch) extends Actor { def receive = { - case "failure1" => latch.countDown - case "failure2" => latch.countDown + case "failure1" => latch.countDown() + case "failure2" => latch.countDown() case _ => { } } } diff --git a/akka-actor/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala similarity index 100% rename from akka-actor/src/test/scala/akka/config/ConfigSpec.scala rename to akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala diff --git a/akka-actor/src/test/scala/akka/dataflow/DataFlowSpec.scala b/akka-actor-tests/src/test/scala/akka/dataflow/DataFlowSpec.scala similarity index 95% rename from akka-actor/src/test/scala/akka/dataflow/DataFlowSpec.scala rename to akka-actor-tests/src/test/scala/akka/dataflow/DataFlowSpec.scala index f5a107f511..e0e0a09e6b 100644 --- a/akka-actor/src/test/scala/akka/dataflow/DataFlowSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dataflow/DataFlowSpec.scala @@ -29,7 +29,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll { thread { z << x() + y() result.set(z()) - latch.countDown + latch.countDown() } thread { x << 40 } thread { y << 2 } @@ -62,7 +62,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll { thread { z << y() result.set(z()) - latch.countDown + latch.countDown() } latch.await(10,TimeUnit.SECONDS) should equal (true) @@ -72,7 +72,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll { /* it("should be able to join streams") { import DataFlow._ - Actor.registry.shutdownAll + Actor.registry.shutdownAll() def ints(n: Int, max: Int, stream: DataFlowStream[Int]): Unit = if (n != max) { stream <<< n @@ -93,7 +93,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll { val t2 = thread { Thread.sleep(1000) result.set(producer.map(x => x * x).foldLeft(0)(_ + _)) - latch.countDown + latch.countDown() } latch.await(3,TimeUnit.SECONDS) should equal (true) @@ -123,7 +123,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll { val x = stream() if(result.addAndGet(x) == 166666500) - latch.countDown + latch.countDown() recurseSum(stream) } @@ -139,7 +139,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll { /* it("should be able to conditionally set variables") { import DataFlow._ - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val latch = new CountDownLatch(1) val x, y, z, v = new DataFlowVariable[Int] @@ -147,7 +147,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll { val main = thread { x << 1 z << Math.max(x(),y()) - latch.countDown + latch.countDown() } val setY = thread { diff --git a/akka-actor/src/test/scala/akka/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ActorModelSpec.scala similarity index 90% rename from akka-actor/src/test/scala/akka/dispatch/ActorModelSpec.scala rename to akka-actor-tests/src/test/scala/akka/dispatch/ActorModelSpec.scala index 55c2e001af..4e60ffcc96 100644 --- a/akka-actor/src/test/scala/akka/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ActorModelSpec.scala @@ -6,6 +6,7 @@ package akka.actor.dispatch import org.scalatest.junit.JUnitSuite import org.junit.Test import org.scalatest.Assertions._ +import akka.testing._ import akka.dispatch._ import akka.actor.{ActorRef, Actor} import akka.actor.Actor._ @@ -53,13 +54,13 @@ object ActorModelSpec { case Await(latch) => ack; latch.await(); busy.switchOff() case Meet(sign, wait) => ack; sign.countDown(); wait.await(); busy.switchOff() case Wait(time) => ack; Thread.sleep(time); busy.switchOff() - case WaitAck(time, l) => ack; Thread.sleep(time); l.countDown; busy.switchOff() + case WaitAck(time, l) => ack; Thread.sleep(time); l.countDown(); busy.switchOff() case Reply(msg) => ack; self.reply(msg); busy.switchOff() case Reply_?(msg) => ack; self.reply_?(msg); busy.switchOff() case Forward(to,msg) => ack; to.forward(msg); busy.switchOff() case CountDown(latch) => ack; latch.countDown(); busy.switchOff() case Increment(count) => ack; count.incrementAndGet(); busy.switchOff() - case CountDownNStop(l)=> ack; l.countDown; self.stop; busy.switchOff() + case CountDownNStop(l)=> ack; l.countDown(); self.stop(); busy.switchOff() case Restart => ack; busy.switchOff(); throw new Exception("Restart requested") } } @@ -201,9 +202,9 @@ abstract class ActorModelSpec extends JUnitSuite { implicit val dispatcher = newInterceptedDispatcher val a = newTestActor assertDispatcher(dispatcher)(starts = 0, stops = 0) - a.start + a.start() assertDispatcher(dispatcher)(starts = 1, stops = 0) - a.stop + a.stop() await(dispatcher.stops.get == 1)(withinMs = dispatcher.timeoutMs * 5) assertDispatcher(dispatcher)(starts = 1, stops = 1) assertRef(a,dispatcher)( @@ -221,19 +222,19 @@ abstract class ActorModelSpec extends JUnitSuite { implicit val dispatcher = newInterceptedDispatcher val a = newTestActor val start,oneAtATime = new CountDownLatch(1) - a.start + a.start() a ! CountDown(start) - assertCountDown(start,3000, "Should process first message within 3 seconds") + assertCountDown(start, Testing.testTime(3000), "Should process first message within 3 seconds") assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, msgsProcessed = 1) a ! Wait(1000) a ! CountDown(oneAtATime) // in case of serialization violation, restart would happen instead of count down - assertCountDown(oneAtATime,1500,"Processed message when allowed") + assertCountDown(oneAtATime, Testing.testTime(1500) ,"Processed message when allowed") assertRefDefaultZero(a)(registers = 1, msgsReceived = 3, msgsProcessed = 3) - a.stop + a.stop() assertRefDefaultZero(a)(registers = 1, unregisters = 1, msgsReceived = 3, msgsProcessed = 3) } @@ -241,55 +242,55 @@ abstract class ActorModelSpec extends JUnitSuite { implicit val dispatcher = newInterceptedDispatcher val a = newTestActor val counter = new CountDownLatch(200) - a.start + a.start() def start = spawn { for (i <- 1 to 20) { a ! WaitAck(1, counter) } } for (i <- 1 to 10) { start } - assertCountDown(counter, 3000, "Should process 200 messages") + assertCountDown(counter, Testing.testTime(3000), "Should process 200 messages") assertRefDefaultZero(a)(registers = 1, msgsReceived = 200, msgsProcessed = 200) - a.stop + a.stop() } def spawn(f : => Unit) = { val thread = new Thread { override def run { f } } - thread.start + thread.start() thread } @Test def dispatcherShouldProcessMessagesInParallel: Unit = { implicit val dispatcher = newInterceptedDispatcher - val a, b = newTestActor.start + val a, b = newTestActor.start() val aStart,aStop,bParallel = new CountDownLatch(1) a ! Meet(aStart,aStop) - assertCountDown(aStart,3000, "Should process first message within 3 seconds") + assertCountDown(aStart, Testing.testTime(3000), "Should process first message within 3 seconds") b ! CountDown(bParallel) - assertCountDown(bParallel, 3000, "Should process other actors in parallel") + assertCountDown(bParallel, Testing.testTime(3000), "Should process other actors in parallel") aStop.countDown() - a.stop - b.stop + a.stop() + b.stop() assertRefDefaultZero(a)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1) assertRefDefaultZero(b)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1) } @Test def dispatcherShouldSuspendAndResumeAFailingNonSupervisedPermanentActor { implicit val dispatcher = newInterceptedDispatcher - val a = newTestActor.start + val a = newTestActor.start() val done = new CountDownLatch(1) a ! Restart a ! CountDown(done) - assertCountDown(done, 3000, "Should be suspended+resumed and done with next message within 3 seconds") - a.stop + assertCountDown(done, Testing.testTime(3000), "Should be suspended+resumed and done with next message within 3 seconds") + a.stop() assertRefDefaultZero(a)(registers = 1,unregisters = 1, msgsReceived = 2, msgsProcessed = 2, suspensions = 1, resumes = 1) } @Test def dispatcherShouldNotProcessMessagesForASuspendedActor { implicit val dispatcher = newInterceptedDispatcher - val a = newTestActor.start + val a = newTestActor.start() val done = new CountDownLatch(1) dispatcher.suspend(a) a ! CountDown(done) @@ -297,11 +298,11 @@ abstract class ActorModelSpec extends JUnitSuite { assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, suspensions = 1) dispatcher.resume(a) - assertCountDown(done, 3000, "Should resume processing of messages when resumed") + assertCountDown(done, Testing.testTime(3000), "Should resume processing of messages when resumed") assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, msgsProcessed = 1, suspensions = 1, resumes = 1) - a.stop + a.stop() assertRefDefaultZero(a)(registers = 1,unregisters = 1, msgsReceived = 1, msgsProcessed = 1, suspensions = 1, resumes = 1) } @@ -312,9 +313,9 @@ abstract class ActorModelSpec extends JUnitSuite { def flood(num: Int) { val cachedMessage = CountDownNStop(new CountDownLatch(num)) (1 to num) foreach { - _ => newTestActor.start ! cachedMessage + _ => newTestActor.start() ! cachedMessage } - assertCountDown(cachedMessage.latch,10000, "Should process " + num + " countdowns") + assertCountDown(cachedMessage.latch, Testing.testTime(10000), "Should process " + num + " countdowns") } for(run <- 1 to 3) { flood(10000) diff --git a/akka-actor/src/test/scala/akka/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/DispatchersSpec.scala similarity index 100% rename from akka-actor/src/test/scala/akka/dispatch/DispatchersSpec.scala rename to akka-actor-tests/src/test/scala/akka/dispatch/DispatchersSpec.scala diff --git a/akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorSpec.scala similarity index 86% rename from akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorSpec.scala rename to akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorSpec.scala index e9b34c17d3..8020c5acde 100644 --- a/akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorSpec.scala @@ -25,7 +25,7 @@ object ExecutorBasedEventDrivenDispatcherActorSpec { class OneWayTestActor extends Actor { self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(self.uuid.toString).build def receive = { - case "OneWay" => OneWayTestActor.oneWay.countDown + case "OneWay" => OneWayTestActor.oneWay.countDown() } } } @@ -35,28 +35,28 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite { private val unit = TimeUnit.MILLISECONDS @Test def shouldSendOneWay = { - val actor = actorOf[OneWayTestActor].start + val actor = actorOf[OneWayTestActor].start() val result = actor ! "OneWay" assert(OneWayTestActor.oneWay.await(1, TimeUnit.SECONDS)) - actor.stop + actor.stop() } @Test def shouldSendReplySync = { - val actor = actorOf[TestActor].start + val actor = actorOf[TestActor].start() val result = (actor !! ("Hello", 10000)).as[String] assert("World" === result.get) - actor.stop + actor.stop() } @Test def shouldSendReplyAsync = { - val actor = actorOf[TestActor].start + val actor = actorOf[TestActor].start() val result = actor !! "Hello" assert("World" === result.get.asInstanceOf[String]) - actor.stop + actor.stop() } @Test def shouldSendReceiveException = { - val actor = actorOf[TestActor].start + val actor = actorOf[TestActor].start() try { actor !! "Failure" fail("Should have thrown an exception") @@ -64,7 +64,7 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite { case e => assert("Expected exception; to test fault-tolerance" === e.getMessage()) } - actor.stop + actor.stop() } @Test def shouldRespectThroughput { @@ -80,24 +80,24 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite { new Actor { self.dispatcher = throughputDispatcher def receive = { case "sabotage" => works.set(false) } - }).start + }).start() val slowOne = actorOf( new Actor { self.dispatcher = throughputDispatcher def receive = { case "hogexecutor" => start.await - case "ping" => if (works.get) latch.countDown + case "ping" => if (works.get) latch.countDown() } - }).start + }).start() slowOne ! "hogexecutor" (1 to 100) foreach { _ => slowOne ! "ping"} fastOne ! "sabotage" - start.countDown + start.countDown() val result = latch.await(3,TimeUnit.SECONDS) - fastOne.stop - slowOne.stop + fastOne.stop() + slowOne.stop() assert(result === true) } @@ -115,24 +115,24 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite { val fastOne = actorOf( new Actor { self.dispatcher = throughputDispatcher - def receive = { case "ping" => if(works.get) latch.countDown; self.stop } - }).start + def receive = { case "ping" => if(works.get) latch.countDown(); self.stop() } + }).start() val slowOne = actorOf( new Actor { self.dispatcher = throughputDispatcher def receive = { - case "hogexecutor" => ready.countDown; start.await - case "ping" => works.set(false); self.stop + case "hogexecutor" => ready.countDown(); start.await + case "ping" => works.set(false); self.stop() } - }).start + }).start() slowOne ! "hogexecutor" slowOne ! "ping" fastOne ! "ping" assert(ready.await(2,TimeUnit.SECONDS) === true) Thread.sleep(deadlineMs+10) // wait just a bit more than the deadline - start.countDown + start.countDown() assert(latch.await(2,TimeUnit.SECONDS) === true) } } diff --git a/akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorsSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorsSpec.scala similarity index 86% rename from akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorsSpec.scala rename to akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorsSpec.scala index 66a02e0d33..dfdaf9794d 100644 --- a/akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorsSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcherActorsSpec.scala @@ -19,7 +19,7 @@ class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustM def receive = { case x: Int => { Thread.sleep(50) // slow actor - finishedCounter.countDown + finishedCounter.countDown() } } } @@ -29,7 +29,7 @@ class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustM def receive = { case x: Int => { - finishedCounter.countDown + finishedCounter.countDown() } } } @@ -37,8 +37,8 @@ class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustM @Test def slowActorShouldntBlockFastActor { val sFinished = new CountDownLatch(50) val fFinished = new CountDownLatch(10) - val s = actorOf(new SlowActor(sFinished)).start - val f = actorOf(new FastActor(fFinished)).start + val s = actorOf(new SlowActor(sFinished)).start() + val f = actorOf(new FastActor(fFinished)).start() // send a lot of stuff to s for (i <- 1 to 50) { @@ -55,7 +55,7 @@ class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustM assert(sFinished.getCount > 0) sFinished.await assert(sFinished.getCount === 0) - f.stop - s.stop + f.stop() + s.stop() } } diff --git a/akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcherSpec.scala similarity index 94% rename from akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcherSpec.scala rename to akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcherSpec.scala index 2085ed66a0..833a027c85 100644 --- a/akka-actor/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcherSpec.scala @@ -25,7 +25,7 @@ object ExecutorBasedEventDrivenWorkStealingDispatcherSpec { case x: Int => { Thread.sleep(delay) invocationCount += 1 - finishedCounter.countDown + finishedCounter.countDown() } } } @@ -58,8 +58,8 @@ class ExecutorBasedEventDrivenWorkStealingDispatcherSpec extends JUnitSuite with @Test def fastActorShouldStealWorkFromSlowActor { val finishedCounter = new CountDownLatch(110) - val slow = actorOf(new DelayableActor("slow", 50, finishedCounter)).start - val fast = actorOf(new DelayableActor("fast", 10, finishedCounter)).start + val slow = actorOf(new DelayableActor("slow", 50, finishedCounter)).start() + val fast = actorOf(new DelayableActor("fast", 10, finishedCounter)).start() var sentToFast = 0 @@ -90,17 +90,17 @@ class ExecutorBasedEventDrivenWorkStealingDispatcherSpec extends JUnitSuite with fast.actor.asInstanceOf[DelayableActor].invocationCount must be > sentToFast fast.actor.asInstanceOf[DelayableActor].invocationCount must be > (slow.actor.asInstanceOf[DelayableActor].invocationCount) - slow.stop - fast.stop + slow.stop() + fast.stop() } @Test def canNotUseActorsOfDifferentTypesInSameDispatcher(): Unit = { val first = actorOf[FirstActor] val second = actorOf[SecondActor] - first.start + first.start() intercept[IllegalActorStateException] { - second.start + second.start() } } @@ -108,9 +108,9 @@ class ExecutorBasedEventDrivenWorkStealingDispatcherSpec extends JUnitSuite with val parent = actorOf[ParentActor] val child = actorOf[ChildActor] - parent.start + parent.start() intercept[IllegalActorStateException] { - child.start + child.start() } } } diff --git a/akka-actor/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala similarity index 86% rename from akka-actor/src/test/scala/akka/dispatch/FutureSpec.scala rename to akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index f99f5f5305..6fc96bb6d2 100644 --- a/akka-actor/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -38,46 +38,46 @@ class FutureSpec extends JUnitSuite { @Test def shouldActorReplyResultThroughExplicitFuture { val actor = actorOf[TestActor] - actor.start + actor.start() val future = actor !!! "Hello" future.await assert(future.result.isDefined) assert("World" === future.result.get) - actor.stop + actor.stop() } @Test def shouldActorReplyExceptionThroughExplicitFuture { val actor = actorOf[TestActor] - actor.start + actor.start() val future = actor !!! "Failure" future.await assert(future.exception.isDefined) assert("Expected exception; to test fault-tolerance" === future.exception.get.getMessage) - actor.stop + actor.stop() } @Test def shouldFutureCompose { - val actor1 = actorOf[TestActor].start - val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start + val actor1 = actorOf[TestActor].start() + val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start() val future1 = actor1 !!! "Hello" flatMap ((s: String) => actor2 !!! s) val future2 = actor1 !!! "Hello" flatMap (actor2 !!! (_: String)) val future3 = actor1 !!! "Hello" flatMap (actor2 !!! (_: Int)) assert(Some(Right("WORLD")) === future1.await.value) assert(Some(Right("WORLD")) === future2.await.value) intercept[ClassCastException] { future3.await.resultOrException } - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldFutureComposePatternMatch { - val actor1 = actorOf[TestActor].start - val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start + val actor1 = actorOf[TestActor].start() + val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start() val future1 = actor1 !!! "Hello" collect { case (s: String) => s } flatMap (actor2 !!! _) val future2 = actor1 !!! "Hello" collect { case (n: Int) => n } flatMap (actor2 !!! _) assert(Some(Right("WORLD")) === future1.await.value) intercept[MatchError] { future2.await.resultOrException } - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldFutureForComprehension { @@ -86,23 +86,25 @@ class FutureSpec extends JUnitSuite { case s: String => self reply s.length case i: Int => self reply (i * 2).toString } - }).start + }).start() + + val future0 = actor !!! "Hello" val future1 = for { - a: Int <- actor !!! "Hello" // returns 5 + a: Int <- future0 // returns 5 b: String <- actor !!! a // returns "10" c: String <- actor !!! 7 // returns "14" } yield b + "-" + c val future2 = for { - a: Int <- actor !!! "Hello" + a: Int <- future0 b: Int <- actor !!! a c: String <- actor !!! 7 } yield b + "-" + c assert(Some(Right("10-14")) === future1.await.value) intercept[ClassCastException] { future2.await.resultOrException } - actor.stop + actor.stop() } @Test def shouldFutureForComprehensionPatternMatch { @@ -113,7 +115,7 @@ class FutureSpec extends JUnitSuite { case Req(s: String) => self reply Res(s.length) case Req(i: Int) => self reply Res((i * 2).toString) } - }).start + }).start() val future1 = for { a <- actor !!! Req("Hello") collect { case Res(x: Int) => x } @@ -129,61 +131,60 @@ class FutureSpec extends JUnitSuite { assert(Some(Right("10-14")) === future1.await.value) intercept[MatchError] { future2.await.resultOrException } - actor.stop + actor.stop() } - // FIXME: implement Futures.awaitEither, and uncomment these two tests @Test def shouldFutureAwaitEitherLeft = { - val actor1 = actorOf[TestActor].start - val actor2 = actorOf[TestActor].start + val actor1 = actorOf[TestActor].start() + val actor2 = actorOf[TestActor].start() val future1 = actor1 !!! "Hello" val future2 = actor2 !!! "NoReply" val result = Futures.awaitEither(future1, future2) assert(result.isDefined) assert("World" === result.get) - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldFutureAwaitEitherRight = { - val actor1 = actorOf[TestActor].start - val actor2 = actorOf[TestActor].start + val actor1 = actorOf[TestActor].start() + val actor2 = actorOf[TestActor].start() val future1 = actor1 !!! "NoReply" val future2 = actor2 !!! "Hello" val result = Futures.awaitEither(future1, future2) assert(result.isDefined) assert("World" === result.get) - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldFutureAwaitOneLeft = { - val actor1 = actorOf[TestActor].start - val actor2 = actorOf[TestActor].start + val actor1 = actorOf[TestActor].start() + val actor2 = actorOf[TestActor].start() val future1 = actor1 !!! "NoReply" val future2 = actor2 !!! "Hello" val result = Futures.awaitOne(List(future1, future2)) assert(result.result.isDefined) assert("World" === result.result.get) - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldFutureAwaitOneRight = { - val actor1 = actorOf[TestActor].start - val actor2 = actorOf[TestActor].start + val actor1 = actorOf[TestActor].start() + val actor2 = actorOf[TestActor].start() val future1 = actor1 !!! "Hello" val future2 = actor2 !!! "NoReply" val result = Futures.awaitOne(List(future1, future2)) assert(result.result.isDefined) assert("World" === result.result.get) - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldFutureAwaitAll = { - val actor1 = actorOf[TestActor].start - val actor2 = actorOf[TestActor].start + val actor1 = actorOf[TestActor].start() + val actor2 = actorOf[TestActor].start() val future1 = actor1 !!! "Hello" val future2 = actor2 !!! "Hello" Futures.awaitAll(List(future1, future2)) @@ -191,8 +192,8 @@ class FutureSpec extends JUnitSuite { assert("World" === future1.result.get) assert(future2.result.isDefined) assert("World" === future2.result.get) - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldFuturesAwaitMapHandleEmptySequence { @@ -201,7 +202,7 @@ class FutureSpec extends JUnitSuite { @Test def shouldFuturesAwaitMapHandleNonEmptySequence { val latches = (1 to 3) map (_ => new StandardLatch) - val actors = latches map (latch => actorOf(new TestDelayActor(latch)).start) + val actors = latches map (latch => actorOf(new TestDelayActor(latch)).start()) val futures = actors map (actor => (actor.!!![String]("Hello"))) latches foreach { _.open } @@ -212,7 +213,7 @@ class FutureSpec extends JUnitSuite { val actors = (1 to 10).toList map { _ => actorOf(new Actor { def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add } - }).start + }).start() } def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 )) } assert(Futures.fold(0)(futures)(_ + _).awaitBlocking.result.get === 45) @@ -222,7 +223,7 @@ class FutureSpec extends JUnitSuite { val actors = (1 to 10).toList map { _ => actorOf(new Actor { def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add } - }).start + }).start() } def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 )) } assert(futures.foldLeft(Future(0))((fr, fa) => for (r <- fr; a <- fa) yield (r + a)).awaitBlocking.result.get === 45) @@ -237,7 +238,7 @@ class FutureSpec extends JUnitSuite { if (add == 6) throw new IllegalArgumentException("shouldFoldResultsWithException: expected") self reply_? add } - }).start + }).start() } def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 100 )) } assert(Futures.fold(0)(futures)(_ + _).awaitBlocking.exception.get.getMessage === "shouldFoldResultsWithException: expected") @@ -251,7 +252,7 @@ class FutureSpec extends JUnitSuite { val actors = (1 to 10).toList map { _ => actorOf(new Actor { def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add } - }).start + }).start() } def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 )) } assert(Futures.reduce(futures)(_ + _).awaitBlocking.result.get === 45) @@ -266,7 +267,7 @@ class FutureSpec extends JUnitSuite { if (add == 6) throw new IllegalArgumentException("shouldFoldResultsWithException: expected") self reply_? add } - }).start + }).start() } def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 100 )) } assert(Futures.reduce(futures)(_ + _).awaitBlocking.exception.get.getMessage === "shouldFoldResultsWithException: expected") @@ -282,7 +283,7 @@ class FutureSpec extends JUnitSuite { val actors = (1 to 10).toList map { _ => actorOf(new Actor { def receive = { case (add: Int, wait: Boolean, latch: StandardLatch) => if (wait) latch.await; self reply_? add } - }).start + }).start() } def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx >= 5, latch)) } @@ -298,10 +299,28 @@ class FutureSpec extends JUnitSuite { @Test def receiveShouldExecuteOnComplete { val latch = new StandardLatch - val actor = actorOf[TestActor].start + val actor = actorOf[TestActor].start() actor !!! "Hello" receive { case "World" => latch.open } assert(latch.tryAwait(5, TimeUnit.SECONDS)) - actor.stop + actor.stop() + } + + @Test def shouldTraverseFutures { + val oddActor = actorOf(new Actor { + var counter = 1 + def receive = { + case 'GetNext => + self reply counter + counter += 2 + } + }).start() + + val oddFutures: List[Future[Int]] = List.fill(100)(oddActor !!! 'GetNext) + assert(Futures.sequence(oddFutures).get.sum === 10000) + oddActor.stop() + + val list = (1 to 100).toList + assert(Futures.traverse(list)(x => Future(x * 2 - 1)).get.sum === 10000) } @Test def shouldHandleThrowables { diff --git a/akka-actor/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala similarity index 97% rename from akka-actor/src/test/scala/akka/dispatch/MailboxConfigSpec.scala rename to akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index 7a469868a4..9ddbfdc332 100644 --- a/akka-actor/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -95,7 +95,7 @@ abstract class MailboxSpec extends case e: Throwable => result.completeWithException(e) } }) - t.start + t.start() result } @@ -173,11 +173,7 @@ class DefaultMailboxSpec extends MailboxSpec { } class PriorityMailboxSpec extends MailboxSpec { - val comparator = new java.util.Comparator[MessageInvocation] { - def compare(a: MessageInvocation, b: MessageInvocation): Int = { - a.## - b.## - } - } + val comparator = PriorityGenerator(_.##) lazy val name = "The priority mailbox implementation" def factory = { case UnboundedMailbox(blockDequeue) => diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala new file mode 100644 index 0000000000..f256715b8c --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala @@ -0,0 +1,51 @@ +package akka.dispatch + +import akka.actor.Actor._ +import akka.actor.Actor +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import java.util.concurrent.CountDownLatch + +class PriorityDispatcherSpec extends WordSpec with MustMatchers { + + "A PriorityExecutorBasedEventDrivenDispatcher" must { + "Order it's messages according to the specified comparator using an unbounded mailbox" in { + testOrdering(UnboundedMailbox(false)) + } + + "Order it's messages according to the specified comparator using a bounded mailbox" in { + testOrdering(BoundedMailbox(false,1000)) + } + } + + def testOrdering(mboxType: MailboxType) { + val dispatcher = new PriorityExecutorBasedEventDrivenDispatcher("Test", + PriorityGenerator({ + case i: Int => i //Reverse order + case 'Result => Int.MaxValue + }: Any => Int), + throughput = 1, + mailboxType = mboxType + ) + + val actor = actorOf(new Actor { + self.dispatcher = dispatcher + var acc: List[Int] = Nil + + def receive = { + case i: Int => acc = i :: acc + case 'Result => self reply_? acc + } + }).start() + + dispatcher.suspend(actor) //Make sure the actor isn't treating any messages, let it buffer the incoming messages + + val msgs = (1 to 100).toList + for(m <- msgs) actor ! m + + dispatcher.resume(actor) //Signal the actor to start treating it's message backlog + + actor.!!![List[Int]]('Result).await.result.get must be === (msgs.reverse) + } + +} diff --git a/akka-actor/src/test/scala/akka/dispatch/ThreadBasedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ThreadBasedActorSpec.scala similarity index 85% rename from akka-actor/src/test/scala/akka/dispatch/ThreadBasedActorSpec.scala rename to akka-actor-tests/src/test/scala/akka/dispatch/ThreadBasedActorSpec.scala index eee135ebab..c6d6e2cb46 100644 --- a/akka-actor/src/test/scala/akka/dispatch/ThreadBasedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ThreadBasedActorSpec.scala @@ -31,30 +31,30 @@ class ThreadBasedActorSpec extends JUnitSuite { val actor = actorOf(new Actor { self.dispatcher = Dispatchers.newThreadBasedDispatcher(self) def receive = { - case "OneWay" => oneWay.countDown + case "OneWay" => oneWay.countDown() } - }).start + }).start() val result = actor ! "OneWay" assert(oneWay.await(1, TimeUnit.SECONDS)) - actor.stop + actor.stop() } @Test def shouldSendReplySync = { - val actor = actorOf[TestActor].start + val actor = actorOf[TestActor].start() val result = (actor !! ("Hello", 10000)).as[String] assert("World" === result.get) - actor.stop + actor.stop() } @Test def shouldSendReplyAsync = { - val actor = actorOf[TestActor].start + val actor = actorOf[TestActor].start() val result = actor !! "Hello" assert("World" === result.get.asInstanceOf[String]) - actor.stop + actor.stop() } @Test def shouldSendReceiveException = { - val actor = actorOf[TestActor].start + val actor = actorOf[TestActor].start() try { actor !! "Failure" fail("Should have thrown an exception") @@ -62,6 +62,6 @@ class ThreadBasedActorSpec extends JUnitSuite { case e => assert("Expected exception; to test fault-tolerance" === e.getMessage()) } - actor.stop + actor.stop() } } diff --git a/akka-actor/src/test/scala/akka/japi/JavaAPITest.scala b/akka-actor-tests/src/test/scala/akka/japi/JavaAPITest.scala similarity index 100% rename from akka-actor/src/test/scala/akka/japi/JavaAPITest.scala rename to akka-actor-tests/src/test/scala/akka/japi/JavaAPITest.scala diff --git a/akka-actor/src/test/scala/akka/misc/ActorRegistrySpec.scala b/akka-actor-tests/src/test/scala/akka/misc/ActorRegistrySpec.scala similarity index 82% rename from akka-actor/src/test/scala/akka/misc/ActorRegistrySpec.scala rename to akka-actor-tests/src/test/scala/akka/misc/ActorRegistrySpec.scala index 09a23dbc5c..13fb72f046 100644 --- a/akka-actor/src/test/scala/akka/misc/ActorRegistrySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/misc/ActorRegistrySpec.scala @@ -33,115 +33,115 @@ class ActorRegistrySpec extends JUnitSuite { import ActorRegistrySpec._ @Test def shouldGetActorByIdFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor = actorOf[TestActor] - actor.start + actor.start() val actors = Actor.registry.actorsFor("MyID") assert(actors.size === 1) assert(actors.head.actor.isInstanceOf[TestActor]) assert(actors.head.id === "MyID") - actor.stop + actor.stop() } @Test def shouldGetActorByUUIDFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor = actorOf[TestActor] val uuid = actor.uuid - actor.start + actor.start() val actorOrNone = Actor.registry.actorFor(uuid) assert(actorOrNone.isDefined) assert(actorOrNone.get.uuid === uuid) - actor.stop + actor.stop() } @Test def shouldGetActorByClassFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor = actorOf[TestActor] - actor.start + actor.start() val actors = Actor.registry.actorsFor(classOf[TestActor]) assert(actors.size === 1) assert(actors.head.actor.isInstanceOf[TestActor]) assert(actors.head.id === "MyID") - actor.stop + actor.stop() } @Test def shouldGetActorByManifestFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor = actorOf[TestActor] - actor.start + actor.start() val actors = Actor.registry.actorsFor[TestActor] assert(actors.size === 1) assert(actors.head.actor.isInstanceOf[TestActor]) assert(actors.head.id === "MyID") - actor.stop + actor.stop() } @Test def shouldFindThingsFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor = actorOf[TestActor] - actor.start + actor.start() val found = Actor.registry.find({ case a: ActorRef if a.actor.isInstanceOf[TestActor] => a }) assert(found.isDefined) assert(found.get.actor.isInstanceOf[TestActor]) assert(found.get.id === "MyID") - actor.stop + actor.stop() } @Test def shouldGetActorsByIdFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor1 = actorOf[TestActor] - actor1.start + actor1.start() val actor2 = actorOf[TestActor] - actor2.start + actor2.start() val actors = Actor.registry.actorsFor("MyID") assert(actors.size === 2) assert(actors.head.actor.isInstanceOf[TestActor]) assert(actors.head.id === "MyID") assert(actors.last.actor.isInstanceOf[TestActor]) assert(actors.last.id === "MyID") - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldGetActorsByClassFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor1 = actorOf[TestActor] - actor1.start + actor1.start() val actor2 = actorOf[TestActor] - actor2.start + actor2.start() val actors = Actor.registry.actorsFor(classOf[TestActor]) assert(actors.size === 2) assert(actors.head.actor.isInstanceOf[TestActor]) assert(actors.head.id === "MyID") assert(actors.last.actor.isInstanceOf[TestActor]) assert(actors.last.id === "MyID") - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldGetActorsByManifestFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor1 = actorOf[TestActor] - actor1.start + actor1.start() val actor2 = actorOf[TestActor] - actor2.start + actor2.start() val actors = Actor.registry.actorsFor[TestActor] assert(actors.size === 2) assert(actors.head.actor.isInstanceOf[TestActor]) assert(actors.head.id === "MyID") assert(actors.last.actor.isInstanceOf[TestActor]) assert(actors.last.id === "MyID") - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldGetActorsByMessageFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor1 = actorOf[TestActor] - actor1.start + actor1.start() val actor2 = actorOf[TestActor2] - actor2.start + actor2.start() val actorsForAcotrTestActor = Actor.registry.actorsFor[TestActor] assert(actorsForAcotrTestActor.size === 1) @@ -159,55 +159,55 @@ class ActorRegistrySpec extends JUnitSuite { val actorsForMessagePing = Actor.registry.actorsFor[Actor]("ping") assert(actorsForMessagePing.size === 2) - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldGetAllActorsFromActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor1 = actorOf[TestActor] - actor1.start + actor1.start() val actor2 = actorOf[TestActor] - actor2.start + actor2.start() val actors = Actor.registry.actors assert(actors.size === 2) assert(actors.head.actor.isInstanceOf[TestActor]) assert(actors.head.id === "MyID") assert(actors.last.actor.isInstanceOf[TestActor]) assert(actors.last.id === "MyID") - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldGetResponseByAllActorsInActorRegistryWhenInvokingForeach { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor1 = actorOf[TestActor] - actor1.start + actor1.start() val actor2 = actorOf[TestActor] - actor2.start + actor2.start() record = "" Actor.registry.foreach(actor => actor !! "ping") assert(record === "pongpong") - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } @Test def shouldShutdownAllActorsInActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor1 = actorOf[TestActor] - actor1.start + actor1.start() val actor2 = actorOf[TestActor] - actor2.start - Actor.registry.shutdownAll + actor2.start() + Actor.registry.shutdownAll() assert(Actor.registry.actors.size === 0) } @Test def shouldRemoveUnregisterActorInActorRegistry { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() val actor1 = actorOf[TestActor] - actor1.start + actor1.start() val actor2 = actorOf[TestActor] - actor2.start + actor2.start() assert(Actor.registry.actors.size === 2) Actor.registry.unregister(actor1) assert(Actor.registry.actors.size === 1) @@ -216,7 +216,7 @@ class ActorRegistrySpec extends JUnitSuite { } @Test def shouldBeAbleToRegisterActorsConcurrently { - Actor.registry.shutdownAll + Actor.registry.shutdownAll() def mkTestActors = for(i <- (1 to 10).toList;j <- 1 to 3000) yield actorOf( new Actor { self.id = i.toString @@ -227,11 +227,11 @@ class ActorRegistrySpec extends JUnitSuite { val barrier = new CyclicBarrier(3) def mkThread(actors: Iterable[ActorRef]) = new Thread { - this.start + this.start() override def run { barrier.await - actors foreach { _.start } - latch.countDown + actors foreach { _.start() } + latch.countDown() } } val a1,a2,a3 = mkTestActors diff --git a/akka-actor/src/test/scala/akka/misc/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/misc/SchedulerSpec.scala similarity index 85% rename from akka-actor/src/test/scala/akka/misc/SchedulerSpec.scala rename to akka-actor-tests/src/test/scala/akka/misc/SchedulerSpec.scala index 79b09d49d1..3afb9096fc 100644 --- a/akka-actor/src/test/scala/akka/misc/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/misc/SchedulerSpec.scala @@ -12,7 +12,7 @@ class SchedulerSpec extends JUnitSuite { def withCleanEndState(action: => Unit) { action Scheduler.restart - Actor.registry.shutdownAll + Actor.registry.shutdownAll() } @@ -21,8 +21,8 @@ class SchedulerSpec extends JUnitSuite { case object Tick val countDownLatch = new CountDownLatch(3) val tickActor = actorOf(new Actor { - def receive = { case Tick => countDownLatch.countDown } - }).start + def receive = { case Tick => countDownLatch.countDown() } + }).start() // run every 50 millisec Scheduler.schedule(tickActor, Tick, 0, 50, TimeUnit.MILLISECONDS) @@ -31,7 +31,7 @@ class SchedulerSpec extends JUnitSuite { val countDownLatch2 = new CountDownLatch(3) - Scheduler.schedule( () => countDownLatch2.countDown, 0, 50, TimeUnit.MILLISECONDS) + Scheduler.schedule( () => countDownLatch2.countDown(), 0, 50, TimeUnit.MILLISECONDS) // after max 1 second it should be executed at least the 3 times already assert(countDownLatch2.await(1, TimeUnit.SECONDS)) @@ -41,11 +41,11 @@ class SchedulerSpec extends JUnitSuite { case object Tick val countDownLatch = new CountDownLatch(3) val tickActor = actorOf(new Actor { - def receive = { case Tick => countDownLatch.countDown } - }).start + def receive = { case Tick => countDownLatch.countDown() } + }).start() // run every 50 millisec Scheduler.scheduleOnce(tickActor, Tick, 50, TimeUnit.MILLISECONDS) - Scheduler.scheduleOnce( () => countDownLatch.countDown, 50, TimeUnit.MILLISECONDS) + Scheduler.scheduleOnce( () => countDownLatch.countDown(), 50, TimeUnit.MILLISECONDS) // after 1 second the wait should fail assert(countDownLatch.await(1, TimeUnit.SECONDS) == false) @@ -60,8 +60,8 @@ class SchedulerSpec extends JUnitSuite { object Ping val ticks = new CountDownLatch(1000) val actor = actorOf(new Actor { - def receive = { case Ping => ticks.countDown } - }).start + def receive = { case Ping => ticks.countDown() } + }).start() val numActors = Actor.registry.actors.length (1 to 1000).foreach( _ => Scheduler.scheduleOnce(actor,Ping,1,TimeUnit.MILLISECONDS) ) assert(ticks.await(10,TimeUnit.SECONDS)) @@ -76,8 +76,8 @@ class SchedulerSpec extends JUnitSuite { val ticks = new CountDownLatch(1) val actor = actorOf(new Actor { - def receive = { case Ping => ticks.countDown } - }).start + def receive = { case Ping => ticks.countDown() } + }).start() (1 to 10).foreach { i => val future = Scheduler.scheduleOnce(actor,Ping,1,TimeUnit.SECONDS) @@ -101,7 +101,7 @@ class SchedulerSpec extends JUnitSuite { self.lifeCycle = Permanent def receive = { - case Ping => pingLatch.countDown + case Ping => pingLatch.countDown() case Crash => throw new Exception("CRASH") } diff --git a/akka-actor/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala similarity index 92% rename from akka-actor/src/test/scala/akka/routing/RoutingSpec.scala rename to akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 09e618e24c..d79bd0651e 100644 --- a/akka-actor/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -25,18 +25,18 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers case `testMsg1` => self.reply(3) case `testMsg2` => self.reply(7) } - } ).start + } ).start() val t2 = actorOf( new Actor() { def receive = { case `testMsg3` => self.reply(11) } - }).start + }).start() val d = dispatcherActor { case `testMsg1`|`testMsg2` => t1 case `testMsg3` => t2 - }.start + }.start() val result = for { a <- (d !! (testMsg1, 5000)).as[Int] @@ -47,14 +47,14 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers result.isDefined must be (true) result.get must be(21) - for(a <- List(t1,t2,d)) a.stop + for(a <- List(t1,t2,d)) a.stop() } @Test def testLogger = { val msgs = new java.util.concurrent.ConcurrentSkipListSet[Any] val latch = new CountDownLatch(2) - val t1 = actorOf(new Actor { def receive = { case _ => } }).start - val l = loggerActor(t1,(x) => { msgs.add(x); latch.countDown }).start + val t1 = actorOf(new Actor { def receive = { case _ => } }).start() + val l = loggerActor(t1,(x) => { msgs.add(x); latch.countDown() }).start() val foo : Any = "foo" val bar : Any = "bar" l ! foo @@ -62,8 +62,8 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers val done = latch.await(5,TimeUnit.SECONDS) done must be (true) msgs must ( have size (2) and contain (foo) and contain (bar) ) - t1.stop - l.stop + t1.stop() + l.stop() } @Test def testSmallestMailboxFirstDispatcher = { @@ -74,23 +74,23 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers case x => Thread.sleep(50) // slow actor t1ProcessedCount.incrementAndGet - latch.countDown + latch.countDown() } - }).start + }).start() val t2ProcessedCount = new AtomicInteger(0) val t2 = actorOf(new Actor { def receive = { case x => t2ProcessedCount.incrementAndGet - latch.countDown + latch.countDown() } - }).start + }).start() val d = loadBalancerActor(new SmallestMailboxFirstIterator(t1 :: t2 :: Nil)) for (i <- 1 to 500) d ! i val done = latch.await(10,TimeUnit.SECONDS) done must be (true) t1ProcessedCount.get must be < (t2ProcessedCount.get) // because t1 is much slower and thus has a bigger mailbox all the time - for(a <- List(t1,t2,d)) a.stop + for(a <- List(t1,t2,d)) a.stop() } @Test def testListener = { @@ -102,16 +102,16 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers case "foo" => gossip("bar") } }) - i.start + i.start() def newListener = actorOf(new Actor { def receive = { case "bar" => num.incrementAndGet - latch.countDown - case "foo" => foreachListener.countDown + latch.countDown() + case "foo" => foreachListener.countDown() } - }).start + }).start() val a1 = newListener val a2 = newListener @@ -129,7 +129,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers num.get must be (2) val withListeners = foreachListener.await(5,TimeUnit.SECONDS) withListeners must be (true) - for(a <- List(i,a1,a2,a3)) a.stop + for(a <- List(i,a1,a2,a3)) a.stop() } @Test def testIsDefinedAt = { @@ -142,28 +142,28 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers case `testMsg1` => self.reply(3) case `testMsg2` => self.reply(7) } - } ).start + } ).start() val t2 = actorOf( new Actor() { def receive = { case `testMsg1` => self.reply(3) case `testMsg2` => self.reply(7) } - } ).start + } ).start() val t3 = actorOf( new Actor() { def receive = { case `testMsg1` => self.reply(3) case `testMsg2` => self.reply(7) } - } ).start + } ).start() val t4 = actorOf( new Actor() { def receive = { case `testMsg1` => self.reply(3) case `testMsg2` => self.reply(7) } - } ).start + } ).start() val d1 = loadBalancerActor(new SmallestMailboxFirstIterator(t1 :: t2 :: Nil)) val d2 = loadBalancerActor(new CyclicIterator[ActorRef](t3 :: t4 :: Nil)) @@ -177,7 +177,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers d2.isDefinedAt(testMsg1) must be (true) d2.isDefinedAt(testMsg3) must be (false) - for(a <- List(t1,t2,d1,d2)) a.stop + for(a <- List(t1,t2,d1,d2)) a.stop() } // Actor Pool Capacity Tests @@ -196,7 +196,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers def receive = { case _ => counter.incrementAndGet - latch.countDown + latch.countDown() self reply_? "success" } }) @@ -211,11 +211,11 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers val successes = new CountDownLatch(2) implicit val successCounterActor = Some(actorOf(new Actor { def receive = { - case "success" => successes.countDown + case "success" => successes.countDown() } - }).start) + }).start()) - val pool = actorOf(new TestPool).start + val pool = actorOf(new TestPool).start() pool ! "a" pool ! "b" @@ -253,14 +253,14 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers } } }) - }).start + }).start() try { (for(count <- 1 to 500) yield actorPool.!!![String]("Test", 20000)) foreach { _.await.resultOrException.get must be ("Response") } } finally { - actorPool.stop + actorPool.stop() } } @@ -283,7 +283,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers case n:Int => Thread.sleep(n) counter.incrementAndGet - latch.countDown + latch.countDown() } }) @@ -299,7 +299,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers // // first message should create the minimum number of delgates // - val pool = actorOf(new TestPool).start + val pool = actorOf(new TestPool).start() pool ! 1 (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2) @@ -356,7 +356,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers case n:Int => Thread.sleep(n) counter.incrementAndGet - latch.countDown + latch.countDown() } }) @@ -370,7 +370,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers def receive = _route } - val pool = actorOf(new TestPool).start + val pool = actorOf(new TestPool).start() var loops = 0 def loop(t:Int) = { @@ -421,7 +421,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers def receive = { case _ => delegates put(self.uuid.toString, "") - latch.countDown + latch.countDown() } }) @@ -433,7 +433,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers def receive = _route } - val pool1 = actorOf(new TestPool1).start + val pool1 = actorOf(new TestPool1).start() pool1 ! "a" pool1 ! "b" var done = latch.await(1,TimeUnit.SECONDS) @@ -450,7 +450,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers def receive = { case _ => delegates put(self.uuid.toString, "") - latch.countDown + latch.countDown() } }) @@ -465,10 +465,10 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers latch = new CountDownLatch(2) delegates clear - val pool2 = actorOf(new TestPool2).start + val pool2 = actorOf(new TestPool2).start() pool2 ! "a" pool2 ! "b" - done = latch.await(1,TimeUnit.SECONDS) + done = latch.await(1, TimeUnit.SECONDS) done must be (true) delegates.size must be (2) pool2 stop @@ -494,7 +494,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers def receive = { case n:Int => Thread.sleep(n) - latch.countDown + latch.countDown() } }) @@ -514,7 +514,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers // // put some pressure on the pool // - val pool = actorOf(new TestPool).start + val pool = actorOf(new TestPool).start() for (m <- 0 to 10) pool ! 250 Thread.sleep(5) val z = (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size diff --git a/akka-testkit/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala b/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala similarity index 92% rename from akka-testkit/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala rename to akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala index 8b52fb3fc4..993f92259d 100644 --- a/akka-testkit/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala @@ -21,11 +21,11 @@ class CallingThreadDispatcherModelSpec extends ActorModelSpec { def flood(num: Int) { val cachedMessage = CountDownNStop(new CountDownLatch(num)) - val keeper = newTestActor.start + val keeper = newTestActor.start() (1 to num) foreach { - _ => newTestActor.start ! cachedMessage + _ => newTestActor.start() ! cachedMessage } - keeper.stop + keeper.stop() assertCountDown(cachedMessage.latch,10000, "Should process " + num + " countdowns") } for(run <- 1 to 3) { diff --git a/akka-actor/src/test/scala/akka/ticket/Ticket001Spec.scala b/akka-actor-tests/src/test/scala/akka/ticket/Ticket001Spec.scala similarity index 91% rename from akka-actor/src/test/scala/akka/ticket/Ticket001Spec.scala rename to akka-actor-tests/src/test/scala/akka/ticket/Ticket001Spec.scala index d4de2675fb..e1a862e03c 100644 --- a/akka-actor/src/test/scala/akka/ticket/Ticket001Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/ticket/Ticket001Spec.scala @@ -5,7 +5,7 @@ import org.scalatest.matchers.MustMatchers class Ticket001Spec extends WordSpec with MustMatchers { - "An XXX" should { + "An XXX" must { "do YYY" in { 1 must be (1) } diff --git a/akka-actor/src/test/scala/akka/ticket/Ticket703Spec.scala b/akka-actor-tests/src/test/scala/akka/ticket/Ticket703Spec.scala similarity index 98% rename from akka-actor/src/test/scala/akka/ticket/Ticket703Spec.scala rename to akka-actor-tests/src/test/scala/akka/ticket/Ticket703Spec.scala index 48dddfe634..3648d0ab45 100644 --- a/akka-actor/src/test/scala/akka/ticket/Ticket703Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/ticket/Ticket703Spec.scala @@ -27,7 +27,7 @@ class Ticket703Spec extends WordSpec with MustMatchers { self.reply_?("Response") } }) - }).start + }).start() (actorPool.!!![String]("Ping", 7000)).await.result must be === Some("Response") } } diff --git a/akka-actor/src/main/java/akka/actor/Actors.java b/akka-actor/src/main/java/akka/actor/Actors.java index 86b1484d05..a5ec9f37dc 100644 --- a/akka-actor/src/main/java/akka/actor/Actors.java +++ b/akka-actor/src/main/java/akka/actor/Actors.java @@ -70,7 +70,6 @@ public class Actors { return Actor$.MODULE$.actorOf(type); } - /** * The message that is sent when an Actor gets a receive timeout. *
@@ -83,4 +82,27 @@ public class Actors {
     public final static ReceiveTimeout$ receiveTimeout() {
         return ReceiveTimeout$.MODULE$;
     }
-}
\ No newline at end of file
+
+    /**
+     * The message that when sent to an Actor kills it by throwing an exception.
+     * 
+     *  actor.sendOneWay(kill());
+     * 
+ * @return the single instance of Kill + */ + public final static Kill$ kill() { + return Kill$.MODULE$; + } + + + /** + * The message that when sent to an Actor shuts it down by calling 'stop'. + *
+     *  actor.sendOneWay(poisonPill());
+     * 
+ * @return the single instance of PoisonPill + */ + public final static PoisonPill$ poisonPill() { + return PoisonPill$.MODULE$; + } +} diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index ff4fe8fb7d..748df1ced0 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -5,31 +5,33 @@ package akka import akka.actor.newUuid - -import java.io.{StringWriter, PrintWriter} import java.net.{InetAddress, UnknownHostException} /** * Akka base Exception. Each Exception gets: *
    - *
  • a UUID for tracking purposes
  • - *
  • a message including exception name, uuid, original message and the stacktrace
  • - *
  • a method 'log' that will log the exception once and only once
  • + *
  • a uuid for tracking purposes
  • + *
  • toString that includes exception name, message, uuid, and the stacktrace
  • *
* * @author Jonas Bonér */ -@serializable abstract class AkkaException(message: String = "") extends RuntimeException(message) { - import AkkaException._ - val exceptionName = getClass.getName +class AkkaException(message: String = "") extends RuntimeException(message) with Serializable { + val uuid = "%s_%s".format(AkkaException.hostname, newUuid) - val uuid = "%s_%s".format(hostname, newUuid) + override lazy val toString = { + val name = getClass.getName + val trace = stackTraceToString + "%s: %s\n[%s]\n%s".format(name, message, uuid, trace) + } - override val toString = "%s\n\t[%s]\n\t%s\n\t%s".format(exceptionName, uuid, message, { - val sw = new StringWriter - printStackTrace(new PrintWriter(sw)) - sw.toString - }) + def stackTraceToString = { + val trace = getStackTrace + val sb = new StringBuffer + for (i <- 0 until trace.length) + sb.append("\tat %s\n" format trace(i)) + sb.toString + } } object AkkaException { diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 882331b177..ee3c48374f 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -6,15 +6,10 @@ package akka.actor import akka.dispatch._ import akka.config.Config._ -import akka.config.Supervision._ -import akka.config.ConfigurationException import akka.util.Helpers.{narrow, narrowSilently} import akka.util.ListenerManagement import akka.AkkaException -import java.util.concurrent.TimeUnit -import java.net.InetSocketAddress - import scala.reflect.BeanProperty import akka.util. {ReflectiveAccess, Duration} import akka.remoteinterface.RemoteSupport @@ -23,14 +18,14 @@ import akka.japi. {Creator, Procedure} /** * Life-cycle messages for the Actors */ -@serializable sealed trait LifeCycleMessage +sealed trait LifeCycleMessage extends Serializable /* Marker trait to show which Messages are automatically handled by Akka */ sealed trait AutoReceivedMessage { self: LifeCycleMessage => } -case class HotSwap(code: ActorRef => Actor.Receive, discardOld: Boolean = true) +case class HotSwap(code: ActorRef => Actor.Receive, discardOld: Boolean = true) extends AutoReceivedMessage with LifeCycleMessage { - + /** * Java API */ @@ -61,6 +56,8 @@ case class UnlinkAndStop(child: ActorRef) extends AutoReceivedMessage with LifeC case object PoisonPill extends AutoReceivedMessage with LifeCycleMessage +case object Kill extends AutoReceivedMessage with LifeCycleMessage + case object ReceiveTimeout extends LifeCycleMessage case class MaximumNumberOfRestartsWithinTimeRangeReached( @@ -75,6 +72,7 @@ class IllegalActorStateException private[akka](message: String) extends AkkaEx class ActorKilledException private[akka](message: String) extends AkkaException(message) class ActorInitializationException private[akka](message: String) extends AkkaException(message) class ActorTimeoutException private[akka](message: String) extends AkkaException(message) +class InvalidMessageException private[akka](message: String) extends AkkaException(message) /** * This message is thrown by default when an Actors behavior doesn't match a message @@ -90,7 +88,7 @@ case class UnhandledMessageException(msg: Any, ref: ActorRef) extends Exception * @author Jonas Bonér */ object Actor extends ListenerManagement { - + /** * Add shutdown cleanups */ @@ -128,19 +126,19 @@ object Actor extends ListenerManagement { type Receive = PartialFunction[Any, Unit] private[actor] val actorRefInCreation = new scala.util.DynamicVariable[Option[ActorRef]](None) - + /** * Creates an ActorRef out of the Actor with type T. *
    *   import Actor._
    *   val actor = actorOf[MyActor]
-   *   actor.start
+   *   actor.start()
    *   actor ! message
-   *   actor.stop
+   *   actor.stop()
    * 
* You can create and start the actor in one statement like this: *
-   *   val actor = actorOf[MyActor].start
+   *   val actor = actorOf[MyActor].start()
    * 
*/ def actorOf[T <: Actor : Manifest]: ActorRef = actorOf(manifest[T].erasure.asInstanceOf[Class[_ <: Actor]]) @@ -150,13 +148,13 @@ object Actor extends ListenerManagement { *
    *   import Actor._
    *   val actor = actorOf(classOf[MyActor])
-   *   actor.start
+   *   actor.start()
    *   actor ! message
-   *   actor.stop
+   *   actor.stop()
    * 
* You can create and start the actor in one statement like this: *
-   *   val actor = actorOf(classOf[MyActor]).start
+   *   val actor = actorOf(classOf[MyActor]).start()
    * 
*/ def actorOf(clazz: Class[_ <: Actor]): ActorRef = new LocalActorRef(() => { @@ -178,13 +176,13 @@ object Actor extends ListenerManagement { *
    *   import Actor._
    *   val actor = actorOf(new MyActor)
-   *   actor.start
+   *   actor.start()
    *   actor ! message
-   *   actor.stop
+   *   actor.stop()
    * 
* You can create and start the actor in one statement like this: *
-   *   val actor = actorOf(new MyActor).start
+   *   val actor = actorOf(new MyActor).start()
    * 
*/ def actorOf(factory: => Actor): ActorRef = new LocalActorRef(() => factory, None) @@ -219,9 +217,9 @@ object Actor extends ListenerManagement { actorOf(new Actor() { self.dispatcher = dispatcher def receive = { - case Spawn => try { body } finally { self.stop } + case Spawn => try { body } finally { self.stop() } } - }).start ! Spawn + }).start() ! Spawn } /** * Implicitly converts the given Option[Any] to a AnyOptionAsTypedOption which offers the method as[T] @@ -276,9 +274,6 @@ object Actor extends ListenerManagement { * } *
* - *

- * The Actor trait also has a 'log' member field that can be used for logging within the Actor. - * * @author Jonas Bonér */ trait Actor { @@ -303,6 +298,7 @@ trait Actor { "\n\tEither use:" + "\n\t\t'val actor = Actor.actorOf[MyActor]', or" + "\n\t\t'val actor = Actor.actorOf(new MyActor(..))'") + Actor.actorRefInCreation.value = None optRef.asInstanceOf[Some[ActorRef]].get.id = getClass.getName //FIXME: Is this needed? optRef.asInstanceOf[Some[ActorRef]] } @@ -352,7 +348,7 @@ trait Actor { *

* Example code: *

-   *   def receive =  {
+   *   def receive = {
    *     case Ping =>
    *       println("got a 'Ping' message")
    *       self.reply("pong")
@@ -370,14 +366,14 @@ trait Actor {
   /**
    * User overridable callback.
    * 

- * Is called when an Actor is started by invoking 'actor.start'. + * Is called when an Actor is started by invoking 'actor.start()'. */ def preStart {} /** * User overridable callback. *

- * Is called when 'actor.stop' is invoked. + * Is called when 'actor.stop()' is invoked. */ def postStop {} @@ -426,7 +422,7 @@ trait Actor { * If "discardOld" is true, an unbecome will be issued prior to pushing the new behavior to the stack */ def become(behavior: Receive, discardOld: Boolean = true) { - if (discardOld) unbecome + if (discardOld) unbecome() self.hotswap = self.hotswap.push(behavior) } @@ -443,8 +439,10 @@ trait Actor { // ========================================= private[akka] final def apply(msg: Any) = { + if (msg.isInstanceOf[AnyRef] && (msg.asInstanceOf[AnyRef] eq null)) + throw new InvalidMessageException("Message from [" + self.sender + "] to [" + self.toString + "] is null") val behaviorStack = self.hotswap - msg match { //FIXME Add check for currentMessage eq null throw new BadUSerException? + msg match { case l: AutoReceivedMessage => autoReceiveMessage(l) case msg if behaviorStack.nonEmpty && behaviorStack.head.isDefinedAt(msg) => behaviorStack.head.apply(msg) @@ -456,15 +454,16 @@ trait Actor { private final def autoReceiveMessage(msg: AutoReceivedMessage): Unit = msg match { case HotSwap(code, discardOld) => become(code(self), discardOld) - case RevertHotSwap => unbecome + case RevertHotSwap => unbecome() case Exit(dead, reason) => self.handleTrapExit(dead, reason) case Link(child) => self.link(child) case Unlink(child) => self.unlink(child) - case UnlinkAndStop(child) => self.unlink(child); child.stop + case UnlinkAndStop(child) => self.unlink(child); child.stop() case Restart(reason) => throw reason + case Kill => throw new ActorKilledException("Kill") case PoisonPill => val f = self.senderFuture - self.stop + self.stop() if (f.isDefined) f.get.completeWithException(new ActorKilledException("PoisonPill")) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index df29edd650..71f916b6e8 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -6,18 +6,14 @@ package akka.actor import akka.event.EventHandler import akka.dispatch._ -import akka.config.Config._ import akka.config.Supervision._ -import akka.AkkaException import akka.util._ import ReflectiveAccess._ import java.net.InetSocketAddress -import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} -import java.util.concurrent.locks.ReentrantLock +import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.{ ScheduledFuture, ConcurrentHashMap, TimeUnit } import java.util.{ Map => JMap } -import java.lang.reflect.Field import scala.reflect.BeanProperty import scala.collection.immutable.Stack @@ -36,19 +32,20 @@ private[akka] object ActorRefInternals { } /** - * Abstraction for unification of sender and senderFuture for later reply + * Abstraction for unification of sender and senderFuture for later reply. + * Can be stored away and used at a later point in time. */ abstract class Channel[T] { - + /** - * Sends the specified message to the channel - * Scala API + * Scala API.

+ * Sends the specified message to the channel. */ def !(msg: T): Unit /** - * Sends the specified message to the channel - * Java API + * Java API.

+ * Sends the specified message to the channel. */ def sendOneWay(msg: T): Unit = this.!(msg) } @@ -63,14 +60,14 @@ abstract class Channel[T] { * import Actor._ * * val actor = actorOf[MyActor] - * actor.start + * actor.start() * actor ! message - * actor.stop + * actor.stop() *

* * You can also create and start actors like this: *
- *   val actor = actorOf[MyActor].start
+ *   val actor = actorOf[MyActor].start()
  * 
* * Here is an example on how to create an actor with a non-default constructor. @@ -78,9 +75,9 @@ abstract class Channel[T] { * import Actor._ * * val actor = actorOf(new MyActor(...)) - * actor.start + * actor.start() * actor ! message - * actor.stop + * actor.stop() * * * @author Jonas Bonér @@ -129,7 +126,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal var receiveTimeout: Option[Long] = None /** - * Akka Java API + * Akka Java API.

* Defines the default timeout for an initial receive invocation. * When specified, the receive function should be able to handle a 'ReceiveTimeout' message. */ @@ -137,7 +134,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def getReceiveTimeout(): Option[Long] = receiveTimeout /** - * Akka Java API + * Akka Java API.

* A faultHandler defines what should be done when a linked actor signals an error. *

* Can be one of: @@ -154,7 +151,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal /** - * Akka Java API + * Akka Java API.

* A lifeCycle defines whether the actor will be stopped on error (Temporary) or if it can be restarted (Permanent) *

* Can be one of: @@ -172,7 +169,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def getLifeCycle(): LifeCycle /** - * Akka Java API + * Akka Java API.

* The default dispatcher is the Dispatchers.globalExecutorBasedEventDrivenDispatcher. * This means that all actors will share the same event-driven executor based dispatcher. *

@@ -192,7 +189,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def homeAddress: Option[InetSocketAddress] /** - * Java API + * Java API.

*/ def getHomeAddress(): InetSocketAddress = homeAddress getOrElse null @@ -220,14 +217,14 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def uuid = _uuid /** - * Akka Java API + * Akka Java API.

* The reference sender Actor of the last received message. * Is defined if the message was sent from another Actor, else None. */ def getSender(): Option[ActorRef] = sender /** - * Akka Java API + * Akka Java API.

* The reference sender future of the last received message. * Is defined if the message was sent with sent with '!!' or '!!!', else None. */ @@ -267,7 +264,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal protected[akka] def uuid_=(uid: Uuid) = _uuid = uid /** - * Akka Java API + * Akka Java API.

* Sends a one-way asynchronous message. E.g. fire-and-forget semantics. *

*

@@ -278,7 +275,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
   def sendOneWay(message: AnyRef): Unit = sendOneWay(message, null)
 
   /**
-   * Akka Java API
+   * Akka Java API. 

* Sends a one-way asynchronous message. E.g. fire-and-forget semantics. *

* Allows you to pass along the sender of the messag. @@ -291,21 +288,21 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def sendOneWay(message: AnyRef, sender: ActorRef): Unit = this.!(message)(Option(sender)) /** - * Akka Java API + * Akka Java API.

* @see sendRequestReply(message: AnyRef, timeout: Long, sender: ActorRef) * Uses the defualt timeout of the Actor (setTimeout()) and omits the sender reference */ def sendRequestReply(message: AnyRef): AnyRef = sendRequestReply(message, timeout, null) /** - * Akka Java API + * Akka Java API.

* @see sendRequestReply(message: AnyRef, timeout: Long, sender: ActorRef) * Uses the defualt timeout of the Actor (setTimeout()) */ def sendRequestReply(message: AnyRef, sender: ActorRef): AnyRef = sendRequestReply(message, timeout, sender) /** - * Akka Java API + * Akka Java API.

* Sends a message asynchronously and waits on a future for a reply message under the hood. *

* It waits on the reply either until it receives it or until the timeout expires @@ -329,21 +326,21 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal } /** - * Akka Java API + * Akka Java API.

* @see sendRequestReplyFuture(message: AnyRef, sender: ActorRef): Future[_] * Uses the Actors default timeout (setTimeout()) and omits the sender */ - def sendRequestReplyFuture(message: AnyRef): Future[_] = sendRequestReplyFuture(message, timeout, null) + def sendRequestReplyFuture[T <: AnyRef](message: AnyRef): Future[T] = sendRequestReplyFuture(message, timeout, null).asInstanceOf[Future[T]] /** - * Akka Java API + * Akka Java API.

* @see sendRequestReplyFuture(message: AnyRef, sender: ActorRef): Future[_] * Uses the Actors default timeout (setTimeout()) */ - def sendRequestReplyFuture(message: AnyRef, sender: ActorRef): Future[_] = sendRequestReplyFuture(message, timeout, sender) + def sendRequestReplyFuture[T <: AnyRef](message: AnyRef, sender: ActorRef): Future[T] = sendRequestReplyFuture(message, timeout, sender).asInstanceOf[Future[T]] /** - * Akka Java API + * Akka Java API.

* Sends a message asynchronously returns a future holding the eventual reply message. *

* NOTE: @@ -353,10 +350,10 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal * If you are sending messages using sendRequestReplyFuture then you have to use getContext().reply(..) * to send a reply message to the original sender. If not then the sender will block until the timeout expires. */ - def sendRequestReplyFuture(message: AnyRef, timeout: Long, sender: ActorRef): Future[_] = !!!(message, timeout)(Option(sender)) + def sendRequestReplyFuture[T <: AnyRef](message: AnyRef, timeout: Long, sender: ActorRef): Future[T] = !!!(message, timeout)(Option(sender)).asInstanceOf[Future[T]] /** - * Akka Java API + * Akka Java API.

* Forwards the message specified to this actor and preserves the original sender of the message */ def forward(message: AnyRef, sender: ActorRef): Unit = @@ -364,7 +361,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal else forward(message)(Some(sender)) /** - * Akka Java API + * Akka Java API.

* Use getContext().replyUnsafe(..) to reply with a message to the original sender of the message currently * being processed. *

@@ -373,7 +370,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def replyUnsafe(message: AnyRef) = reply(message) /** - * Akka Java API + * Akka Java API.

* Use getContext().replySafe(..) to reply with a message to the original sender of the message currently * being processed. *

@@ -387,7 +384,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def actorClass: Class[_ <: Actor] /** - * Akka Java API + * Akka Java API.

* Returns the class for the Actor instance that is managed by the ActorRef. */ def getActorClass(): Class[_ <: Actor] = actorClass @@ -398,7 +395,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def actorClassName: String /** - * Akka Java API + * Akka Java API.

* Returns the class name for the Actor instance that is managed by the ActorRef. */ def getActorClassName(): String = actorClassName @@ -454,6 +451,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal *

* To be invoked from within the actor itself. */ + @deprecated("Will be removed after 1.1, use Actor.actorOf instead") def spawn(clazz: Class[_ <: Actor]): ActorRef /** @@ -461,6 +459,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal *

* To be invoked from within the actor itself. */ + @deprecated("Will be removed after 1.1, client managed actors will be removed") def spawnRemote(clazz: Class[_ <: Actor], hostname: String, port: Int, timeout: Long): ActorRef /** @@ -468,6 +467,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal *

* To be invoked from within the actor itself. */ + @deprecated("Will be removed after 1.1, use use Actor.remote.actorOf instead and then link on success") def spawnLink(clazz: Class[_ <: Actor]): ActorRef /** @@ -475,6 +475,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal *

* To be invoked from within the actor itself. */ + @deprecated("Will be removed after 1.1, client managed actors will be removed") def spawnLinkRemote(clazz: Class[_ <: Actor], hostname: String, port: Int, timeout: Long): ActorRef /** @@ -483,7 +484,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def mailboxSize = dispatcher.mailboxSize(this) /** - * Akka Java API + * Akka Java API.

* Returns the mailbox size. */ def getMailboxSize(): Int = mailboxSize @@ -494,7 +495,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def supervisor: Option[ActorRef] /** - * Akka Java API + * Akka Java API.

* Returns the supervisor, if there is one. */ def getSupervisor(): ActorRef = supervisor getOrElse null @@ -506,12 +507,36 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal def linkedActors: JMap[Uuid, ActorRef] /** - * Java API + * Java API.

* Returns an unmodifiable Java Map containing the linked actors, * please note that the backing map is thread-safe but not immutable */ def getLinkedActors(): JMap[Uuid, ActorRef] = linkedActors + /** + * Abstraction for unification of sender and senderFuture for later reply + */ + def channel: Channel[Any] = { + if (senderFuture.isDefined) { + new Channel[Any] { + val future = senderFuture.get + def !(msg: Any) = future completeWithResult msg + } + } else if (sender.isDefined) { + val someSelf = Some(this) + new Channel[Any] { + val client = sender.get + def !(msg: Any) = client.!(msg)(someSelf) + } + } else throw new IllegalActorStateException("No channel available") + } + + /** + * Java API.

+ * Abstraction for unification of sender and senderFuture for later reply + */ + def getChannel: Channel[Any] = channel + protected[akka] def invoke(messageHandle: MessageInvocation): Unit protected[akka] def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]): Unit @@ -736,7 +761,7 @@ class LocalActorRef private[akka] ( */ def startLink(actorRef: ActorRef): Unit = guard.withGuard { link(actorRef) - actorRef.start + actorRef.start() } /** @@ -745,7 +770,7 @@ class LocalActorRef private[akka] ( * To be invoked from within the actor itself. */ def spawn(clazz: Class[_ <: Actor]): ActorRef = - Actor.actorOf(clazz).start + Actor.actorOf(clazz).start() /** * Atomically create (from actor class), start and make an actor remote. @@ -756,7 +781,7 @@ class LocalActorRef private[akka] ( ensureRemotingEnabled val ref = Actor.remote.actorOf(clazz, hostname, port) ref.timeout = timeout - ref.start + ref.start() } /** @@ -767,7 +792,7 @@ class LocalActorRef private[akka] ( def spawnLink(clazz: Class[_ <: Actor]): ActorRef = { val actor = spawn(clazz) link(actor) - actor.start + actor.start() actor } @@ -781,7 +806,7 @@ class LocalActorRef private[akka] ( val actor = Actor.remote.actorOf(clazz, hostname, port) actor.timeout = timeout link(actor) - actor.start + actor.start() actor } @@ -866,7 +891,7 @@ class LocalActorRef private[akka] ( case _ => if (_supervisor.isDefined) notifySupervisorWithMessage(Exit(this, reason)) - else dead.stop + else dead.stop() } } @@ -906,7 +931,6 @@ class LocalActorRef private[akka] ( failedActor match { case p: Proxyable => - //p.swapProxiedActor(freshActor) //TODO: broken failedActor.preRestart(reason) failedActor.postRestart(reason) case _ => @@ -940,10 +964,10 @@ class LocalActorRef private[akka] ( case _ => // either permanent or none where default is permanent val success = try { - performRestart + performRestart() true } catch { - case e => + case e => EventHandler.error(e, this, "Exception in restart of Actor [%s]".format(toString)) false // an error or exception here should trigger a retry } finally { @@ -994,13 +1018,18 @@ class LocalActorRef private[akka] ( // ========= PRIVATE FUNCTIONS ========= private[this] def newActor: Actor = { - val a = Actor.actorRefInCreation.withValue(Some(this)) { actorFactory() } - if (a eq null) throw new ActorInitializationException("Actor instance passed to ActorRef can not be 'null'") - a + try { + Actor.actorRefInCreation.value = Some(this) + val a = actorFactory() + if (a eq null) throw new ActorInitializationException("Actor instance passed to ActorRef can not be 'null'") + a + } finally { + Actor.actorRefInCreation.value = None + } } private def shutDownTemporaryActor(temporaryActor: ActorRef) { - temporaryActor.stop + temporaryActor.stop() _linkedActors.remove(temporaryActor.uuid) // remove the temporary actor // if last temporary actor is gone, then unlink me from supervisor if (_linkedActors.isEmpty) notifySupervisorWithMessage(UnlinkAndStop(this)) @@ -1009,7 +1038,7 @@ class LocalActorRef private[akka] ( private def handleExceptionInDispatch(reason: Throwable, message: Any) = { EventHandler.error(reason, this, message.toString) - + //Prevent any further messages to be processed until the actor has been restarted dispatcher.suspend(this) @@ -1032,7 +1061,7 @@ class LocalActorRef private[akka] ( { val i = _linkedActors.values.iterator while (i.hasNext) { - i.next.stop + i.next.stop() i.remove } } @@ -1121,9 +1150,9 @@ private[akka] case class RemoteActorRef private[akka] ( senderOption: Option[ActorRef], senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T] = { val future = Actor.remote.send[T]( - message, senderOption, senderFuture, - homeAddress.get, timeout, - false, this, None, + message, senderOption, senderFuture, + homeAddress.get, timeout, + false, this, None, actorType, loader) if (future.isDefined) future.get else throw new IllegalActorStateException("Expected a future from remote call to actor " + toString) @@ -1201,8 +1230,8 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef => */ def id: String - def id_=(id: String): Unit - + def id_=(id: String): Unit + /** * User overridable callback/setting. *

@@ -1267,7 +1296,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef => def !(message: Any)(implicit sender: Option[ActorRef] = None): Unit = { if (isRunning) postMessageToMailbox(message, sender) else throw new ActorInitializationException( - "Actor has not been started, you need to invoke 'actor.start' before using it") + "Actor has not been started, you need to invoke 'actor.start()' before using it") } /** @@ -1298,7 +1327,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef => } future.resultOrException } else throw new ActorInitializationException( - "Actor has not been started, you need to invoke 'actor.start' before using it") + "Actor has not been started, you need to invoke 'actor.start()' before using it") } /** @@ -1313,7 +1342,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef => def !!![T](message: Any, timeout: Long = this.timeout)(implicit sender: Option[ActorRef] = None): Future[T] = { if (isRunning) postMessageToMailboxAndCreateFutureResultWithTimeout[T](message, timeout, sender, None) else throw new ActorInitializationException( - "Actor has not been started, you need to invoke 'actor.start' before using it") + "Actor has not been started, you need to invoke 'actor.start()' before using it") } /** @@ -1327,7 +1356,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef => postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, sender.get.sender, sender.get.senderFuture) else postMessageToMailbox(message, sender.get.sender) - } else throw new ActorInitializationException("Actor has not been started, you need to invoke 'actor.start' before using it") + } else throw new ActorInitializationException("Actor has not been started, you need to invoke 'actor.start()' before using it") } /** @@ -1360,24 +1389,6 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef => } else false } - /** - * Abstraction for unification of sender and senderFuture for later reply - */ - def channel: Channel[Any] = { - if (senderFuture.isDefined) { - new Channel[Any] { - val future = senderFuture.get - def !(msg: Any) = future completeWithResult msg - } - } else if (sender.isDefined) { - val someSelf = Some(this) - new Channel[Any] { - val client = sender.get - def !(msg: Any) = client.!(msg)(someSelf) - } - } else throw new IllegalActorStateException("No channel available") - } - /** * Atomically create (from actor class) and start an actor. */ diff --git a/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala b/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala index 03bef32d25..27057ce8a9 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala @@ -261,9 +261,9 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag val actorRef = elements.nextElement val proxy = typedActorFor(actorRef) if (proxy.isDefined) TypedActorModule.typedActorObjectInstance.get.stop(proxy.get) - else actorRef.stop + else actorRef.stop() } - } else foreach(_.stop) + } else foreach(_.stop()) if (Remote.isEnabled) { Actor.remote.clear //TODO: REVISIT: Should this be here? } diff --git a/akka-actor/src/main/scala/akka/actor/BootableActorLoaderService.scala b/akka-actor/src/main/scala/akka/actor/BootableActorLoaderService.scala index 4b96f9ab5d..48c1127f84 100644 --- a/akka-actor/src/main/scala/akka/actor/BootableActorLoaderService.scala +++ b/akka-actor/src/main/scala/akka/actor/BootableActorLoaderService.scala @@ -56,6 +56,6 @@ trait BootableActorLoaderService extends Bootable { abstract override def onUnload = { super.onUnload - Actor.registry.shutdownAll + Actor.registry.shutdownAll() } } diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 046685f22d..37752b1373 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -42,6 +42,14 @@ object FSM { } } + /* + * This extractor is just convenience for matching a (S, S) pair, including a + * reminder what the new state is. + */ + object -> { + def unapply[S](in : (S, S)) = Some(in) + } + /* * With these implicits in scope, you can write "5 seconds" anywhere a * Duration or Option[Duration] is expected. This is conveniently true @@ -90,6 +98,23 @@ object FSM { * Each of the above also supports the method replying(AnyRef) for * sending a reply before changing state. * + * While changing state, custom handlers may be invoked which are registered + * using onTransition. This is meant to enable concentrating + * different concerns in different places; you may choose to use + * when for describing the properties of a state, including of + * course initiating transitions, but you can describe the transitions using + * onTransision to avoid having to duplicate that code among + * multiple paths which lead to a transition: + * + *

+ * onTransition {
+ *   case Active -> _ => cancelTimer("activeTimer")
+ * }
+ * 
+ * + * Multiple such blocks are supported and all of them will be called, not only + * the first matching one. + * * Another feature is that other actors may subscribe for transition events by * sending a SubscribeTransitionCallback message to this actor; * use UnsubscribeTransitionCallback before stopping the other @@ -119,7 +144,7 @@ trait FSM[S, D] { type StateFunction = scala.PartialFunction[Event[D], State] type Timeout = Option[Duration] - type TransitionHandler = (S, S) => Unit + type TransitionHandler = PartialFunction[(S, S), Unit] /* DSL */ @@ -239,12 +264,43 @@ trait FSM[S, D] { /** * Set handler which is called upon each state transition, i.e. not when - * staying in the same state. + * staying in the same state. This may use the pair extractor defined in the + * FSM companion object like so: + * + *
+   * onTransition {
+   *   case Old -> New => doSomething
+   * }
+   * 
+ * + * It is also possible to supply a 2-ary function object: + * + *
+   * onTransition(handler _)
+   *
+   * private def handler(from: S, to: S) { ... }
+   * 
+ * + * The underscore is unfortunately necessary to enable the nicer syntax shown + * above (it uses the implicit conversion total2pf under the hood). + * + * Multiple handlers may be installed, and every one of them will be + * called, not only the first one matching. */ - protected final def onTransition(transitionHandler: TransitionHandler) = { - transitionEvent = transitionHandler + protected final def onTransition(transitionHandler: TransitionHandler) { + transitionEvent :+= transitionHandler } + /** + * Convenience wrapper for using a total function instead of a partial + * function literal. To be used with onTransition. + */ + implicit protected final def total2pf(transitionHandler: (S, S) => Unit) = + new PartialFunction[(S, S), Unit] { + def isDefinedAt(in : (S, S)) = true + def apply(in : (S, S)) { transitionHandler(in._1, in._2) } + } + /** * Set handler which is called upon termination of this FSM actor. */ @@ -300,7 +356,10 @@ trait FSM[S, D] { case StopEvent(reason, _, _) => } - private var transitionEvent: TransitionHandler = (from, to) => { + private var transitionEvent: List[TransitionHandler] = Nil + private def handleTransition(prev : S, next : S) { + val tuple = (prev, next) + for (te <- transitionEvent) { if (te.isDefinedAt(tuple)) te(tuple) } } override final protected def receive: Receive = { @@ -351,7 +410,7 @@ trait FSM[S, D] { terminate(Failure("Next state %s does not exist".format(nextState.stateName))) } else { if (currentState.stateName != nextState.stateName) { - transitionEvent.apply(currentState.stateName, nextState.stateName) + handleTransition(currentState.stateName, nextState.stateName) if (!transitionCallBackList.isEmpty) { val transition = Transition(self, currentState.stateName, nextState.stateName) transitionCallBackList.foreach(_ ! transition) @@ -374,7 +433,7 @@ trait FSM[S, D] { private def terminate(reason: Reason) = { terminateEvent.apply(StopEvent(reason, currentState.stateName, currentState.stateData)) - self.stop + self.stop() } case class Event[D](event: Any, stateData: D) diff --git a/akka-actor/src/main/scala/akka/actor/Supervisor.scala b/akka-actor/src/main/scala/akka/actor/Supervisor.scala index bb08bcdf80..22abafaccc 100644 --- a/akka-actor/src/main/scala/akka/actor/Supervisor.scala +++ b/akka-actor/src/main/scala/akka/actor/Supervisor.scala @@ -106,7 +106,7 @@ sealed class Supervisor(handler: FaultHandlingStrategy) { private val _childActors = new ConcurrentHashMap[String, List[ActorRef]] private val _childSupervisors = new CopyOnWriteArrayList[Supervisor] - private[akka] val supervisor = actorOf(new SupervisorActor(handler)).start + private[akka] val supervisor = actorOf(new SupervisorActor(handler)).start() def uuid = supervisor.uuid @@ -114,7 +114,7 @@ sealed class Supervisor(handler: FaultHandlingStrategy) { this } - def shutdown(): Unit = supervisor.stop + def shutdown(): Unit = supervisor.stop() def link(child: ActorRef) = supervisor.link(child) @@ -131,7 +131,7 @@ sealed class Supervisor(handler: FaultHandlingStrategy) { servers.map(server => server match { case Supervise(actorRef, lifeCycle, registerAsRemoteService) => - actorRef.start + actorRef.start() val className = actorRef.actor.getClass.getName val currentActors = { val list = _childActors.get(className) @@ -163,7 +163,7 @@ final class SupervisorActor private[akka] (handler: FaultHandlingStrategy) exten val i = self.linkedActors.values.iterator while(i.hasNext) { val ref = i.next - ref.stop + ref.stop() self.unlink(ref) } } diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index bf2208d960..77500d4059 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -4,14 +4,8 @@ package akka.actor -import akka.dispatch._ -import akka.config.Supervision._ import akka.japi.{Creator, Procedure} -import java.net.InetSocketAddress - -import scala.reflect.BeanProperty - /** * Subclass this abstract class to create a MDB-style untyped actor. *

@@ -62,11 +56,21 @@ import scala.reflect.BeanProperty */ abstract class UntypedActor extends Actor { + /** + * To be implemented by concrete UntypedActor. Defines the message handler. + */ + @throws(classOf[Exception]) + def onReceive(message: Any): Unit + + /** + * Returns the 'self' reference with the API. + */ def getContext(): ActorRef = self - final protected def receive = { - case msg => onReceive(msg) - } + /** + * Returns the 'self' reference with the API. + */ + def context(): ActorRef = self /** * Java API for become @@ -79,8 +83,47 @@ abstract class UntypedActor extends Actor { def become(behavior: Procedure[Any], discardOld: Boolean): Unit = super.become({ case msg => behavior.apply(msg) }, discardOld) - @throws(classOf[Exception]) - def onReceive(message: Any): Unit + /** + * User overridable callback. + *

+ * Is called when an Actor is started by invoking 'actor.start()'. + */ + override def preStart {} + + /** + * User overridable callback. + *

+ * Is called when 'actor.stop()' is invoked. + */ + override def postStop {} + + /** + * User overridable callback. + *

+ * Is called on a crashed Actor right BEFORE it is restarted to allow clean up of resources before Actor is terminated. + */ + override def preRestart(reason: Throwable) {} + + /** + * User overridable callback. + *

+ * Is called right AFTER restart on the newly created Actor to allow reinitialization after an Actor crash. + */ + override def postRestart(reason: Throwable) {} + + /** + * User overridable callback. + *

+ * Is called when a message isn't handled by the current behavior of the actor + * by default it throws an UnhandledMessageException + */ + override def unhandled(msg: Any) { + throw new UnhandledMessageException(msg, self) + } + + final protected def receive = { + case msg => onReceive(msg) + } } /** @@ -88,4 +131,4 @@ abstract class UntypedActor extends Actor { * * @author Jonas Bonér */ -trait UntypedActorFactory extends Creator[Actor] \ No newline at end of file +trait UntypedActorFactory extends Creator[Actor] diff --git a/akka-actor/src/main/scala/akka/Implicits.scala b/akka-actor/src/main/scala/akka/actor/package.scala similarity index 94% rename from akka-actor/src/main/scala/akka/Implicits.scala rename to akka-actor/src/main/scala/akka/actor/package.scala index 6370e1c2fd..0a781649eb 100644 --- a/akka-actor/src/main/scala/akka/Implicits.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -14,7 +14,10 @@ package object actor { ref.asInstanceOf[ActorRef] type Uuid = com.eaio.uuid.UUID + def newUuid(): Uuid = new Uuid() - def uuidFrom(time: Long, clockSeqAndNode: Long): Uuid = new Uuid(time,clockSeqAndNode) + + def uuidFrom(time: Long, clockSeqAndNode: Long): Uuid = new Uuid(time, clockSeqAndNode) + def uuidFrom(uuid: String): Uuid = new Uuid(uuid) } diff --git a/akka-actor/src/main/scala/akka/dataflow/DataFlow.scala b/akka-actor/src/main/scala/akka/dataflow/DataFlow.scala index 72fbbaaeb2..7ac900333d 100644 --- a/akka-actor/src/main/scala/akka/dataflow/DataFlow.scala +++ b/akka-actor/src/main/scala/akka/dataflow/DataFlow.scala @@ -40,19 +40,19 @@ object DataFlow { * Executes the supplied function in another thread. */ def thread[A <: AnyRef, R <: AnyRef](body: A => R) = - actorOf(new ReactiveEventBasedThread(body)).start + actorOf(new ReactiveEventBasedThread(body)).start() /** * JavaAPI. * Executes the supplied Function in another thread. */ def thread[A <: AnyRef, R <: AnyRef](body: Function[A,R]) = - actorOf(new ReactiveEventBasedThread(body.apply)).start + actorOf(new ReactiveEventBasedThread(body.apply)).start() private class ReactiveEventBasedThread[A <: AnyRef, T <: AnyRef](body: A => T) extends Actor { def receive = { - case Exit => self.stop + case Exit => self.stop() case message => self.reply(body(message.asInstanceOf[A])) } } @@ -84,7 +84,7 @@ object DataFlow { dataFlow.blockedReaders.poll ! s } else throw new DataFlowVariableException( "Attempt to change data flow variable (from [" + dataFlow.value.get + "] to [" + v + "])") - case Exit => self.stop + case Exit => self.stop() } } @@ -97,11 +97,11 @@ object DataFlow { case None => readerFuture = self.senderFuture } case Set(v:T) => readerFuture.map(_ completeWithResult v) - case Exit => self.stop + case Exit => self.stop() } } - private[this] val in = actorOf(new In(this)).start + private[this] val in = actorOf(new In(this)).start() /** * Sets the value of this variable (if unset) with the value of the supplied variable. @@ -143,7 +143,7 @@ object DataFlow { */ def apply(): T = { value.get getOrElse { - val out = actorOf(new Out(this)).start + val out = actorOf(new Out(this)).start() val result = try { blockedReaders offer out diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 5ee900a222..7c52e716f2 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -54,7 +54,7 @@ object Dispatchers { val MAILBOX_PUSH_TIME_OUT = Duration(config.getInt("akka.actor.default-dispatcher.mailbox-push-timeout-time", 10), TIME_UNIT) val THROUGHPUT_DEADLINE_TIME = Duration(config.getInt("akka.actor.throughput-deadline-time",-1), TIME_UNIT) val THROUGHPUT_DEADLINE_TIME_MILLIS = THROUGHPUT_DEADLINE_TIME.toMillis.toInt - val MAILBOX_TYPE: MailboxType = if (MAILBOX_CAPACITY < 0) UnboundedMailbox() else BoundedMailbox() + val MAILBOX_TYPE: MailboxType = if (MAILBOX_CAPACITY < 1) UnboundedMailbox() else BoundedMailbox() lazy val defaultGlobalDispatcher = { config.getSection("akka.actor.default-dispatcher").flatMap(from).getOrElse(globalExecutorBasedEventDrivenDispatcher) diff --git a/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcher.scala index 28c07c6af6..261a4c8170 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcher.scala @@ -116,18 +116,18 @@ class ExecutorBasedEventDrivenDispatcher( override def mailboxSize(actorRef: ActorRef) = getMailbox(actorRef).size def createMailbox(actorRef: ActorRef): AnyRef = mailboxType match { - case b: UnboundedMailbox if b.blocking => - new DefaultUnboundedMessageQueue(true) with ExecutableMailbox { - final def dispatcher = ExecutorBasedEventDrivenDispatcher.this + case b: UnboundedMailbox => + if (b.blocking) { + new DefaultUnboundedMessageQueue(true) with ExecutableMailbox { + final def dispatcher = ExecutorBasedEventDrivenDispatcher.this + } + } else { //If we have an unbounded, non-blocking mailbox, we can go lockless + new ConcurrentLinkedQueue[MessageInvocation] with MessageQueue with ExecutableMailbox { + final def dispatcher = ExecutorBasedEventDrivenDispatcher.this + final def enqueue(m: MessageInvocation) = this.add(m) + final def dequeue(): MessageInvocation = this.poll() + } } - - case b: UnboundedMailbox if !b.blocking => //If we have an unbounded, non-blocking mailbox, we can go lockless - new ConcurrentLinkedQueue[MessageInvocation] with MessageQueue with ExecutableMailbox { - final def dispatcher = ExecutorBasedEventDrivenDispatcher.this - final def enqueue(m: MessageInvocation) = this.add(m) - final def dequeue(): MessageInvocation = this.poll() - } - case b: BoundedMailbox => new DefaultBoundedMessageQueue(b.capacity, b.pushTimeOut, b.blocking) with ExecutableMailbox { final def dispatcher = ExecutorBasedEventDrivenDispatcher.this @@ -229,9 +229,31 @@ trait ExecutableMailbox extends Runnable { self: MessageQueue => } } +object PriorityGenerator { + /** + * Creates a PriorityGenerator that uses the supplied function as priority generator + */ + def apply(priorityFunction: Any => Int): PriorityGenerator = new PriorityGenerator { + def gen(message: Any): Int = priorityFunction(message) + } +} + +/** + * A PriorityGenerator is a convenience API to create a Comparator that orders the messages of a + * PriorityExecutorBasedEventDrivenDispatcher + */ +abstract class PriorityGenerator extends java.util.Comparator[MessageInvocation] { + def gen(message: Any): Int + + final def compare(thisMessage: MessageInvocation, thatMessage: MessageInvocation): Int = + gen(thisMessage.message) - gen(thatMessage.message) +} + /** * A version of ExecutorBasedEventDrivenDispatcher that gives all actors registered to it a priority mailbox, * prioritized according to the supplied comparator. + * + * The dispatcher will process the messages with the _lowest_ priority first. */ class PriorityExecutorBasedEventDrivenDispatcher( name: String, @@ -242,10 +264,10 @@ class PriorityExecutorBasedEventDrivenDispatcher( config: ThreadPoolConfig = ThreadPoolConfig() ) extends ExecutorBasedEventDrivenDispatcher(name, throughput, throughputDeadlineTime, mailboxType, config) with PriorityMailbox { - def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, throughputDeadlineTime: Int, mailboxType: UnboundedMailbox) = + def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, throughputDeadlineTime: Int, mailboxType: MailboxType) = this(name, comparator, throughput, throughputDeadlineTime, mailboxType,ThreadPoolConfig()) // Needed for Java API usage - def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, mailboxType: UnboundedMailbox) = + def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, mailboxType: MailboxType) = this(name, comparator, throughput, Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType) // Needed for Java API usage def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int) = @@ -258,6 +280,15 @@ class PriorityExecutorBasedEventDrivenDispatcher( this(name, comparator, Dispatchers.THROUGHPUT, Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS, Dispatchers.MAILBOX_TYPE) // Needed for Java API usage } + +/** + * Can be used to give an ExecutorBasedEventDrivenDispatcher's actors priority-enabled mailboxes + * + * Usage: + * new ExecutorBasedEventDrivenDispatcher(...) with PriorityMailbox { + * val comparator = ...comparator that determines mailbox priority ordering... + * } + */ trait PriorityMailbox { self: ExecutorBasedEventDrivenDispatcher => def comparator: java.util.Comparator[MessageInvocation] diff --git a/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcher.scala index 451cdf8b80..f2f63a3ff4 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcher.scala @@ -78,12 +78,12 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher( override private[akka] def dispatch(invocation: MessageInvocation) = { val mbox = getMailbox(invocation.receiver) - if (mbox.dispatcherLock.locked && attemptDonationOf(invocation, mbox)) { + /*if (!mbox.isEmpty && attemptDonationOf(invocation, mbox)) { //We were busy and we got to donate the message to some other lucky guy, we're done here - } else { + } else {*/ mbox enqueue invocation registerForExecution(mbox) - } + //} } override private[akka] def reRegisterForExecution(mbox: MessageQueue with ExecutableMailbox): Unit = { @@ -110,13 +110,13 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher( /** * Returns true if the donation succeeded or false otherwise */ - protected def attemptDonationOf(message: MessageInvocation, donorMbox: MessageQueue with ExecutableMailbox): Boolean = { + /*protected def attemptDonationOf(message: MessageInvocation, donorMbox: MessageQueue with ExecutableMailbox): Boolean = { val actors = members // copy to prevent concurrent modifications having any impact doFindDonorRecipient(donorMbox, actors, System.identityHashCode(message) % actors.size) match { case null => false case recipient => donate(message, recipient) } - } + }*/ /** * Rewrites the message and adds that message to the recipients mailbox diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index ba0b7b83ba..1f86613b47 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -57,17 +57,21 @@ object Futures { } /** - * Java API + * Java API. * Returns a Future to the result of the first future in the list that is completed */ def firstCompletedOf[T <: AnyRef](futures: java.lang.Iterable[Future[T]], timeout: Long): Future[T] = - firstCompletedOf(scala.collection.JavaConversions.asScalaIterable(futures),timeout) + firstCompletedOf(scala.collection.JavaConversions.iterableAsScalaIterable(futures),timeout) /** * A non-blocking fold over the specified futures. * The fold is performed on the thread where the last future is completed, * the result will be the first failure of any of the futures, or any failure in the actual fold, * or the result of the fold. + * Example: + *

+   *   val result = Futures.fold(0)(futures)(_ + _).await.result
+   * 
*/ def fold[T,R](zero: R, timeout: Long = Actor.TIMEOUT)(futures: Iterable[Future[T]])(foldFun: (R, T) => R): Future[R] = { if(futures.isEmpty) { @@ -83,7 +87,7 @@ object Futures { results add r.b if (results.size == allDone) { //Only one thread can get here try { - result completeWithResult scala.collection.JavaConversions.asScalaIterable(results).foldLeft(zero)(foldFun) + result completeWithResult scala.collection.JavaConversions.collectionAsScalaIterable(results).foldLeft(zero)(foldFun) } catch { case e: Exception => EventHandler.error(e, this, e.getMessage) @@ -111,10 +115,14 @@ object Futures { * or the result of the fold. */ def fold[T <: AnyRef, R <: AnyRef](zero: R, timeout: Long, futures: java.lang.Iterable[Future[T]], fun: akka.japi.Function2[R, T, R]): Future[R] = - fold(zero, timeout)(scala.collection.JavaConversions.asScalaIterable(futures))( fun.apply _ ) + fold(zero, timeout)(scala.collection.JavaConversions.iterableAsScalaIterable(futures))( fun.apply _ ) /** * Initiates a fold over the supplied futures where the fold-zero is the result value of the Future that's completed first + * Example: + *
+   *   val result = Futures.reduce(futures)(_ + _).await.result
+   * 
*/ def reduce[T, R >: T](futures: Iterable[Future[T]], timeout: Long = Actor.TIMEOUT)(op: (R,T) => T): Future[R] = { if (futures.isEmpty) @@ -138,27 +146,40 @@ object Futures { } /** - * Java API + * Java API. * Initiates a fold over the supplied futures where the fold-zero is the result value of the Future that's completed first */ def reduce[T <: AnyRef, R >: T](futures: java.lang.Iterable[Future[T]], timeout: Long, fun: akka.japi.Function2[R, T, T]): Future[R] = - reduce(scala.collection.JavaConversions.asScalaIterable(futures), timeout)(fun.apply _) + reduce(scala.collection.JavaConversions.iterableAsScalaIterable(futures), timeout)(fun.apply _) import scala.collection.mutable.Builder import scala.collection.generic.CanBuildFrom + /** + * Simple version of Futures.traverse. Transforms a Traversable[Future[A]] into a Future[Traversable[A]]. + * Useful for reducing many Futures into a single Future. + */ def sequence[A, M[_] <: Traversable[_]](in: M[Future[A]], timeout: Long = Actor.TIMEOUT)(implicit cbf: CanBuildFrom[M[Future[A]], A, M[A]]): Future[M[A]] = in.foldLeft(new DefaultCompletableFuture[Builder[A, M[A]]](timeout).completeWithResult(cbf(in)): Future[Builder[A, M[A]]])((fr, fa) => for (r <- fr; a <- fa.asInstanceOf[Future[A]]) yield (r += a)).map(_.result) + /** + * Transforms a Traversable[A] into a Future[Traversable[B]] using the provided Function A => Future[B]. + * This is useful for performing a parallel map. For example, to apply a function to all items of a list + * in parallel: + *
+   * val myFutureList = Futures.traverse(myList)(x => Future(myFunc(x)))
+   * 
+ */ def traverse[A, B, M[_] <: Traversable[_]](in: M[A], timeout: Long = Actor.TIMEOUT)(fn: A => Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): Future[M[B]] = in.foldLeft(new DefaultCompletableFuture[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a) => val fb = fn(a.asInstanceOf[A]) for (r <- fr; b <-fb) yield (r += b) }.map(_.result) - //Deprecations - - + // ===================================== + // Deprecations + // ===================================== + /** * (Blocking!) */ @@ -299,6 +320,12 @@ sealed trait Future[+T] { /** * When the future is compeleted with a valid result, apply the provided * PartialFunction to the result. + *
+   *   val result = future receive {
+   *     case Foo => "foo"
+   *     case Bar => "bar"
+   *   }.await.result
+   * 
*/ final def receive(pf: PartialFunction[Any, Unit]): Future[T] = onComplete { f => val optr = f.result @@ -313,6 +340,14 @@ sealed trait Future[+T] { * result of this Future if a match is found, or else return a MatchError. * If this Future is completed with an exception then the new Future will * also contain this exception. + * Example: + *
+   * val future1 = for {
+   *   a <- actor !!! Req("Hello") collect { case Res(x: Int)    => x }
+   *   b <- actor !!! Req(a)       collect { case Res(x: String) => x }
+   *   c <- actor !!! Req(7)       collect { case Res(x: String) => x }
+   * } yield b + "-" + c
+   * 
*/ final def collect[A](pf: PartialFunction[Any, A]): Future[A] = { val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS) @@ -343,6 +378,14 @@ sealed trait Future[+T] { * Creates a new Future by applying a function to the successful result of * this Future. If this Future is completed with an exception then the new * Future will also contain this exception. + * Example: + *
+   * val future1 = for {
+   *   a: Int    <- actor !!! "Hello" // returns 5
+   *   b: String <- actor !!! a       // returns "10"
+   *   c: String <- actor !!! 7       // returns "14"
+   * } yield b + "-" + c
+   * 
*/ final def map[A](f: T => A): Future[A] = { val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS) @@ -371,6 +414,14 @@ sealed trait Future[+T] { * this Future, and returns the result of the function as the new Future. * If this Future is completed with an exception then the new Future will * also contain this exception. + * Example: + *
+   * val future1 = for {
+   *   a: Int    <- actor !!! "Hello" // returns 5
+   *   b: String <- actor !!! a       // returns "10"
+   *   c: String <- actor !!! 7       // returns "14"
+   * } yield b + "-" + c
+   * 
*/ final def flatMap[A](f: T => Future[A]): Future[A] = { val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS) @@ -425,7 +476,7 @@ sealed trait Future[+T] { } /** - * Returns the current result, throws the exception is one has been raised, else returns None + * Returns the current result, throws the exception is one has been raised, else returns None */ final def resultOrException: Option[T] = { val v = value @@ -450,50 +501,50 @@ sealed trait Future[+T] { } /** - * Essentially this is the Promise (or write-side) of a Future (read-side) + * Essentially this is the Promise (or write-side) of a Future (read-side). */ trait CompletableFuture[T] extends Future[T] { /** - * Completes this Future with the specified result, if not already completed, - * returns this + * Completes this Future with the specified result, if not already completed. + * @return this */ - def complete(value: Either[Throwable, T]): CompletableFuture[T] + def complete(value: Either[Throwable, T]): Future[T] /** - * Completes this Future with the specified result, if not already completed, - * returns this + * Completes this Future with the specified result, if not already completed. + * @return this */ - final def completeWithResult(result: T): CompletableFuture[T] = complete(Right(result)) + final def completeWithResult(result: T): Future[T] = complete(Right(result)) /** - * Completes this Future with the specified exception, if not already completed, - * returns this + * Completes this Future with the specified exception, if not already completed. + * @return this */ - final def completeWithException(exception: Throwable): CompletableFuture[T] = complete(Left(exception)) + final def completeWithException(exception: Throwable): Future[T] = complete(Left(exception)) /** * Completes this Future with the specified other Future, when that Future is completed, - * unless this Future has already been completed - * returns this + * unless this Future has already been completed. + * @return this. */ - final def completeWith(other: Future[T]): CompletableFuture[T] = { + final def completeWith(other: Future[T]): Future[T] = { other onComplete { f => complete(f.value.get) } this } /** - * Alias for complete(Right(value)) + * Alias for complete(Right(value)). */ - final def << (value: T): CompletableFuture[T] = complete(Right(value)) + final def << (value: T): Future[T] = complete(Right(value)) /** - * Alias for completeWith(other) + * Alias for completeWith(other). */ - final def << (other : Future[T]): CompletableFuture[T] = completeWith(other) + final def << (other : Future[T]): Future[T] = completeWith(other) } /** - * Based on code from the actorom actor framework by Sergio Bossa [http://code.google.com/p/actorom/]. + * The default concrete Future implementation. */ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends CompletableFuture[T] { diff --git a/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala b/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala index b319d8ac87..374b3b4b45 100644 --- a/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala +++ b/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala @@ -143,7 +143,7 @@ trait MessageDispatcher { while (i.hasNext()) { val uuid = i.next() Actor.registry.actorFor(uuid) match { - case Some(actor) => actor.stop + case Some(actor) => actor.stop() case None => {} } } @@ -215,12 +215,15 @@ trait MessageDispatcher { * Trait to be used for hooking in new dispatchers into Dispatchers.fromConfig */ abstract class MessageDispatcherConfigurator { + /** + * Returns an instance of MessageDispatcher given a Configuration + */ def configure(config: Configuration): MessageDispatcher def mailboxType(config: Configuration): MailboxType = { val capacity = config.getInt("mailbox-capacity", Dispatchers.MAILBOX_CAPACITY) // FIXME how do we read in isBlocking for mailbox? Now set to 'false'. - if (capacity < 0) UnboundedMailbox() + if (capacity < 1) UnboundedMailbox() else BoundedMailbox(false, capacity, Duration(config.getInt("mailbox-push-timeout-time", Dispatchers.MAILBOX_PUSH_TIME_OUT.toMillis.toInt), TIME_UNIT)) } diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 31d5dca0eb..83c30f23e0 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -160,12 +160,11 @@ class MonitorableThreadFactory(val name: String) extends ThreadFactory { */ object MonitorableThread { val DEFAULT_NAME = "MonitorableThread" - val created = new AtomicInteger - val alive = new AtomicInteger - @volatile var debugLifecycle = false -} -// FIXME fix the issues with using the monitoring in MonitorableThread + // FIXME use MonitorableThread.created and MonitorableThread.alive in monitoring + val created = new AtomicInteger + val alive = new AtomicInteger +} /** * @author Jonas Bonér @@ -178,7 +177,6 @@ class MonitorableThread(runnable: Runnable, name: String) }) override def run = { - val debug = MonitorableThread.debugLifecycle try { MonitorableThread.alive.incrementAndGet super.run diff --git a/akka-actor/src/main/scala/akka/event/EventHandler.scala b/akka-actor/src/main/scala/akka/event/EventHandler.scala index d4fc55b0a9..9d53b57787 100644 --- a/akka-actor/src/main/scala/akka/event/EventHandler.scala +++ b/akka-actor/src/main/scala/akka/event/EventHandler.scala @@ -5,8 +5,6 @@ package akka.event import akka.actor._ -import Actor._ -import akka.dispatch._ import akka.config.Config._ import akka.config.ConfigurationException import akka.util.{ListenerManagement, ReflectiveAccess} @@ -25,7 +23,7 @@ import akka.AkkaException * case EventHandler.Warning(instance, message) => ... * case EventHandler.Info(instance, message) => ... * case EventHandler.Debug(instance, message) => ... - * case genericEvent => ... + * case genericEvent => ... * } * }) * @@ -35,17 +33,17 @@ import akka.AkkaException *
*

* However best is probably to register the listener in the 'akka.conf' - * configuration file. + * configuration file. *

* Log an error event: *

- * EventHandler.notify(EventHandler.Error(exception, this, message.toString))
+ * EventHandler.notify(EventHandler.Error(exception, this, message))
  * 
* Or use the direct methods (better performance): *
- * EventHandler.error(exception, this, message.toString)
+ * EventHandler.error(exception, this, message)
  * 
- * + * * @author Jonas Bonér */ object EventHandler extends ListenerManagement { @@ -61,11 +59,20 @@ object EventHandler extends ListenerManagement { sealed trait Event { @transient val thread: Thread = Thread.currentThread + val level: Int + } + case class Error(cause: Throwable, instance: AnyRef, message: Any = "") extends Event { + override val level = ErrorLevel + } + case class Warning(instance: AnyRef, message: Any = "") extends Event { + override val level = WarningLevel + } + case class Info(instance: AnyRef, message: Any = "") extends Event { + override val level = InfoLevel + } + case class Debug(instance: AnyRef, message: Any = "") extends Event { + override val level = DebugLevel } - case class Error(cause: Throwable, instance: AnyRef, message: String = "") extends Event - case class Warning(instance: AnyRef, message: String = "") extends Event - case class Info(instance: AnyRef, message: String = "") extends Event - case class Debug(instance: AnyRef, message: String = "") extends Event val error = "[ERROR] [%s] [%s] [%s] %s\n%s".intern val warning = "[WARN] [%s] [%s] [%s] %s".intern @@ -73,7 +80,7 @@ object EventHandler extends ListenerManagement { val debug = "[DEBUG] [%s] [%s] [%s] %s".intern val generic = "[GENERIC] [%s] [%s]".intern val ID = "event:handler".intern - + class EventHandlerException extends AkkaException lazy val EventHandlerDispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(ID).build @@ -87,32 +94,61 @@ object EventHandler extends ListenerManagement { "Configuration option 'akka.event-handler-level' is invalid [" + unknown + "]") } - def notify(event: => AnyRef) = notifyListeners(event) + def notify(event: Any) { + if (event.isInstanceOf[Event]) { + if (level >= event.asInstanceOf[Event].level) notifyListeners(event) + } else + notifyListeners(event) + } def notify[T <: Event : ClassManifest](event: => T) { if (level >= levelFor(classManifest[T].erasure.asInstanceOf[Class[_ <: Event]])) notifyListeners(event) } - def error(cause: Throwable, instance: AnyRef, message: => String) = { + def error(cause: Throwable, instance: AnyRef, message: => String) { if (level >= ErrorLevel) notifyListeners(Error(cause, instance, message)) } - def error(instance: AnyRef, message: => String) = { + def error(cause: Throwable, instance: AnyRef, message: Any) { + if (level >= ErrorLevel) notifyListeners(Error(cause, instance, message)) + } + + def error(instance: AnyRef, message: => String) { if (level >= ErrorLevel) notifyListeners(Error(new EventHandlerException, instance, message)) } - def warning(instance: AnyRef, message: => String) = { + def error(instance: AnyRef, message: Any) { + if (level >= ErrorLevel) notifyListeners(Error(new EventHandlerException, instance, message)) + } + + def warning(instance: AnyRef, message: => String) { if (level >= WarningLevel) notifyListeners(Warning(instance, message)) } - def info(instance: AnyRef, message: => String) = { + def warning(instance: AnyRef, message: Any) { + if (level >= WarningLevel) notifyListeners(Warning(instance, message)) + } + + def info(instance: AnyRef, message: => String) { if (level >= InfoLevel) notifyListeners(Info(instance, message)) } - def debug(instance: AnyRef, message: => String) = { + def info(instance: AnyRef, message: Any) { + if (level >= InfoLevel) notifyListeners(Info(instance, message)) + } + + def debug(instance: AnyRef, message: => String) { if (level >= DebugLevel) notifyListeners(Debug(instance, message)) } + def debug(instance: AnyRef, message: Any) { + if (level >= DebugLevel) notifyListeners(Debug(instance, message)) + } + + def isInfoEnabled = level >= InfoLevel + + def isDebugEnabled = level >= DebugLevel + def formattedTimestamp = DateFormat.getInstance.format(new Date) def stackTraceFor(e: Throwable) = { @@ -129,7 +165,7 @@ object EventHandler extends ListenerManagement { else if (eventClass.isInstanceOf[Debug]) DebugLevel else DebugLevel } - + class DefaultListener extends Actor { self.id = ID self.dispatcher = EventHandlerDispatcher @@ -165,10 +201,14 @@ object EventHandler extends ListenerManagement { } } - config.getList("akka.event-handlers") foreach { listenerName => + val defaultListeners = config.getList("akka.event-handlers") match { + case Nil => "akka.event.EventHandler$DefaultListener" :: Nil + case listeners => listeners + } + defaultListeners foreach { listenerName => try { - ReflectiveAccess.getClassFor[Actor](listenerName) map { - clazz => addListener(Actor.actorOf(clazz).start) + ReflectiveAccess.getClassFor[Actor](listenerName) map { clazz => + addListener(Actor.actorOf(clazz).start()) } } catch { case e: Exception => diff --git a/akka-actor/src/main/scala/akka/remoteinterface/RemoteEventHandler.scala b/akka-actor/src/main/scala/akka/remoteinterface/RemoteEventHandler.scala new file mode 100644 index 0000000000..c3ad4d9c79 --- /dev/null +++ b/akka-actor/src/main/scala/akka/remoteinterface/RemoteEventHandler.scala @@ -0,0 +1,44 @@ +package akka.remoteinterface + +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +import akka.actor.Actor +import akka.event.EventHandler + +/** + * Remote client and server event listener that pipes the events to the standard Akka EventHander. + * + * @author Jonas Bonér + */ +class RemoteEventHandler extends Actor { + import EventHandler._ + + self.id = ID + self.dispatcher = EventHandlerDispatcher + + def receive = { + + // client + case RemoteClientError(cause, client, address) => EventHandler.error(cause, client, "RemoteClientError - Address[%s]" format address.toString) + case RemoteClientWriteFailed(request, cause, client, address) => EventHandler.error(cause, client, "RemoteClientWriteFailed - Request[%s] Address[%s]".format(address.toString)) + case RemoteClientDisconnected(client, address) => EventHandler.info(client, "RemoteClientDisconnected - Address[%s]" format address.toString) + case RemoteClientConnected(client, address) => EventHandler.info(client, "RemoteClientConnected - Address[%s]" format address.toString) + case RemoteClientStarted(client, address) => EventHandler.info(client, "RemoteClientStarted - Address[%s]" format address.toString) + case RemoteClientShutdown(client, address) => EventHandler.info(client, "RemoteClientShutdown - Address[%s]" format address.toString) + + // server + case RemoteServerError(cause, server) => EventHandler.error(cause, server, "RemoteServerError") + case RemoteServerWriteFailed(request, cause, server, clientAddress) => EventHandler.error(cause, server, "RemoteServerWriteFailed - Request[%s] Address[%s]" format (request, clientAddress.toString)) + case RemoteServerStarted(server) => EventHandler.info(server, "RemoteServerStarted") + case RemoteServerShutdown(server) => EventHandler.info(server, "RemoteServerShutdown") + case RemoteServerClientConnected(server, clientAddress) => EventHandler.info(server, "RemoteServerClientConnected - Address[%s]" format clientAddress.toString) + case RemoteServerClientDisconnected(server, clientAddress) => EventHandler.info(server, "RemoteServerClientDisconnected - Address[%s]" format clientAddress.toString) + case RemoteServerClientClosed(server, clientAddress) => EventHandler.info(server, "RemoteServerClientClosed - Address[%s]" format clientAddress.toString) + + case _ => //ignore other + } +} + + diff --git a/akka-actor/src/main/scala/akka/remoteinterface/RemoteInterface.scala b/akka-actor/src/main/scala/akka/remoteinterface/RemoteInterface.scala index 62dcc422ee..081b622f39 100644 --- a/akka-actor/src/main/scala/akka/remoteinterface/RemoteInterface.scala +++ b/akka-actor/src/main/scala/akka/remoteinterface/RemoteInterface.scala @@ -5,22 +5,23 @@ package akka.remoteinterface import akka.japi.Creator -import java.net.InetSocketAddress import akka.actor._ import akka.util._ import akka.dispatch.CompletableFuture -import akka.config.Config.{config, TIME_UNIT} -import java.util.concurrent.ConcurrentHashMap import akka.AkkaException -import reflect.BeanProperty + +import scala.reflect.BeanProperty + +import java.net.InetSocketAddress +import java.util.concurrent.ConcurrentHashMap +import java.io.{PrintWriter, PrintStream} trait RemoteModule { - val UUID_PREFIX = "uuid:" + val UUID_PREFIX = "uuid:".intern def optimizeLocalScoped_?(): Boolean //Apply optimizations for remote operations in local scope protected[akka] def notifyListeners(message: => Any): Unit - private[akka] def actors: ConcurrentHashMap[String, ActorRef] private[akka] def actorsByUuid: ConcurrentHashMap[String, ActorRef] private[akka] def actorsFactories: ConcurrentHashMap[String, () => ActorRef] @@ -28,7 +29,6 @@ trait RemoteModule { private[akka] def typedActorsByUuid: ConcurrentHashMap[String, AnyRef] private[akka] def typedActorsFactories: ConcurrentHashMap[String, () => AnyRef] - /** Lookup methods **/ private[akka] def findActorById(id: String) : ActorRef = actors.get(id) @@ -84,7 +84,6 @@ case class RemoteClientWriteFailed( @BeanProperty client: RemoteClientModule, @BeanProperty remoteAddress: InetSocketAddress) extends RemoteClientLifeCycleEvent - /** * Life-cycle events for RemoteServer. */ @@ -114,38 +113,57 @@ case class RemoteServerWriteFailed( /** * Thrown for example when trying to send a message using a RemoteClient that is either not started or shut down. */ -class RemoteClientException private[akka] (message: String, - @BeanProperty val client: RemoteClientModule, - val remoteAddress: InetSocketAddress) extends AkkaException(message) +class RemoteClientException private[akka] ( + message: String, + @BeanProperty val client: RemoteClientModule, + val remoteAddress: InetSocketAddress) extends AkkaException(message) /** - * Returned when a remote exception cannot be instantiated or parsed + * Thrown when the remote server actor dispatching fails for some reason. */ -case class UnparsableException private[akka] (originalClassName: String, - originalMessage: String) extends AkkaException(originalMessage) +class RemoteServerException private[akka] (message: String) extends AkkaException(message) +/** + * Thrown when a remote exception sent over the wire cannot be loaded and instantiated + */ +case class CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException private[akka] (cause: Throwable, originalClassName: String, originalMessage: String) + extends AkkaException("\nParsingError[%s]\nOriginalException[%s]\nOriginalMessage[%s]" + .format(cause.toString, originalClassName, originalMessage)) { + override def printStackTrace = cause.printStackTrace + override def printStackTrace(printStream: PrintStream) = cause.printStackTrace(printStream) + override def printStackTrace(printWriter: PrintWriter) = cause.printStackTrace(printWriter) +} abstract class RemoteSupport extends ListenerManagement with RemoteServerModule with RemoteClientModule { + + lazy val eventHandler: ActorRef = { + val handler = Actor.actorOf[RemoteEventHandler].start() + // add the remote client and server listener that pipes the events to the event handler system + addListener(handler) + handler + } + def shutdown { + eventHandler.stop() + removeListener(eventHandler) this.shutdownClientModule this.shutdownServerModule clear } - /** * Creates a Client-managed ActorRef out of the Actor of the specified Class. * If the supplied host and port is identical of the configured local node, it will be a local actor *
    *   import Actor._
    *   val actor = actorOf(classOf[MyActor],"www.akka.io", 2552)
-   *   actor.start
+   *   actor.start()
    *   actor ! message
-   *   actor.stop
+   *   actor.stop()
    * 
* You can create and start the actor in one statement like this: *
-   *   val actor = actorOf(classOf[MyActor],"www.akka.io", 2552).start
+   *   val actor = actorOf(classOf[MyActor],"www.akka.io", 2552).start()
    * 
*/ @deprecated("Will be removed after 1.1") @@ -158,13 +176,13 @@ abstract class RemoteSupport extends ListenerManagement with RemoteServerModule *
    *   import Actor._
    *   val actor = actorOf(classOf[MyActor],"www.akka.io",2552)
-   *   actor.start
+   *   actor.start()
    *   actor ! message
-   *   actor.stop
+   *   actor.stop()
    * 
* You can create and start the actor in one statement like this: *
-   *   val actor = actorOf(classOf[MyActor],"www.akka.io",2552).start
+   *   val actor = actorOf(classOf[MyActor],"www.akka.io",2552).start()
    * 
*/ @deprecated("Will be removed after 1.1") @@ -186,13 +204,13 @@ abstract class RemoteSupport extends ListenerManagement with RemoteServerModule *
    *   import Actor._
    *   val actor = actorOf[MyActor]("www.akka.io",2552)
-   *   actor.start
+   *   actor.start()
    *   actor ! message
-   *   actor.stop
+   *   actor.stop()
    * 
* You can create and start the actor in one statement like this: *
-   *   val actor = actorOf[MyActor]("www.akka.io",2552).start
+   *   val actor = actorOf[MyActor]("www.akka.io",2552).start()
    * 
*/ @deprecated("Will be removed after 1.1") @@ -471,4 +489,4 @@ trait RemoteClientModule extends RemoteModule { self: RemoteModule => @deprecated("Will be removed after 1.1") private[akka] def unregisterClientManagedActor(hostname: String, port: Int, uuid: Uuid): Unit -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/routing/Iterators.scala b/akka-actor/src/main/scala/akka/routing/Iterators.scala index a01cc6fe2d..6172cf7ea6 100644 --- a/akka-actor/src/main/scala/akka/routing/Iterators.scala +++ b/akka-actor/src/main/scala/akka/routing/Iterators.scala @@ -6,19 +6,22 @@ package akka.routing import akka.actor.ActorRef import scala.collection.JavaConversions._ +import scala.collection.immutable.Seq /** * An Iterator that is either always empty or yields an infinite number of Ts. */ -trait InfiniteIterator[T] extends Iterator[T] +trait InfiniteIterator[T] extends Iterator[T] { + val items: Seq[T] +} /** * CyclicIterator is a round-robin style InfiniteIterator that cycles the supplied List. */ -class CyclicIterator[T](items: List[T]) extends InfiniteIterator[T] { +case class CyclicIterator[T](val items: Seq[T]) extends InfiniteIterator[T] { def this(items: java.util.List[T]) = this(items.toList) - @volatile private[this] var current: List[T] = items + @volatile private[this] var current: Seq[T] = items def hasNext = items != Nil @@ -29,14 +32,13 @@ class CyclicIterator[T](items: List[T]) extends InfiniteIterator[T] { } override def exists(f: T => Boolean): Boolean = items.exists(f) - } /** * This InfiniteIterator always returns the Actor that has the currently smallest mailbox * useful for work-stealing. */ -class SmallestMailboxFirstIterator(items : List[ActorRef]) extends InfiniteIterator[ActorRef] { +case class SmallestMailboxFirstIterator(val items : Seq[ActorRef]) extends InfiniteIterator[ActorRef] { def this(items: java.util.List[ActorRef]) = this(items.toList) def hasNext = items != Nil diff --git a/akka-actor/src/main/scala/akka/routing/Routers.scala b/akka-actor/src/main/scala/akka/routing/Routers.scala index b0283ce77d..57511076e8 100644 --- a/akka-actor/src/main/scala/akka/routing/Routers.scala +++ b/akka-actor/src/main/scala/akka/routing/Routers.scala @@ -15,7 +15,11 @@ trait Dispatcher { this: Actor => protected def routes: PartialFunction[Any, ActorRef] + protected def broadcast(message: Any) {} + protected def dispatch: Receive = { + case Routing.Broadcast(message) => + broadcast(message) case a if routes.isDefinedAt(a) => if (isSenderDefined) routes(a).forward(transform(a))(someSelf) else routes(a).!(transform(a))(None) @@ -34,15 +38,19 @@ abstract class UntypedDispatcher extends UntypedActor { protected def route(msg: Any): ActorRef + protected def broadcast(message: Any) {} + private def isSenderDefined = self.senderFuture.isDefined || self.sender.isDefined @throws(classOf[Exception]) def onReceive(msg: Any): Unit = { - val r = route(msg) - if(r eq null) - throw new IllegalStateException("No route for " + msg + " defined!") - if (isSenderDefined) r.forward(transform(msg))(someSelf) - else r.!(transform(msg))(None) + if (msg.isInstanceOf[Routing.Broadcast]) broadcast(msg.asInstanceOf[Routing.Broadcast].message) + else { + val r = route(msg) + if (r eq null) throw new IllegalStateException("No route for " + msg + " defined!") + if (isSenderDefined) r.forward(transform(msg))(someSelf) + else r.!(transform(msg))(None) + } } } @@ -53,7 +61,11 @@ abstract class UntypedDispatcher extends UntypedActor { trait LoadBalancer extends Dispatcher { self: Actor => protected def seq: InfiniteIterator[ActorRef] - protected def routes = { case x if seq.hasNext => seq.next } + protected def routes = { + case x if seq.hasNext => seq.next + } + + override def broadcast(message: Any) = seq.items.foreach(_ ! message) override def isDefinedAt(msg: Any) = seq.exists( _.isDefinedAt(msg) ) } @@ -69,5 +81,7 @@ abstract class UntypedLoadBalancer extends UntypedDispatcher { if (seq.hasNext) seq.next else null + override def broadcast(message: Any) = seq.items.foreach(_ ! message) + override def isDefinedAt(msg: Any) = seq.exists( _.isDefinedAt(msg) ) } diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 1d43950f8b..d31653a2fb 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -9,6 +9,9 @@ import akka.actor.Actor._ object Routing { + sealed trait RoutingMessage + case class Broadcast(message: Any) extends RoutingMessage + type PF[A, B] = PartialFunction[A, B] /** @@ -31,26 +34,26 @@ object Routing { /** * Creates a LoadBalancer from the thunk-supplied InfiniteIterator. */ - def loadBalancerActor(actors: => InfiniteIterator[ActorRef]): ActorRef = + def loadBalancerActor(actors: => InfiniteIterator[ActorRef]): ActorRef = actorOf(new Actor with LoadBalancer { val seq = actors - }).start + }).start() /** * Creates a Dispatcher given a routing and a message-transforming function. */ - def dispatcherActor(routing: PF[Any, ActorRef], msgTransformer: (Any) => Any): ActorRef = + def dispatcherActor(routing: PF[Any, ActorRef], msgTransformer: (Any) => Any): ActorRef = actorOf(new Actor with Dispatcher { override def transform(msg: Any) = msgTransformer(msg) def routes = routing - }).start + }).start() /** * Creates a Dispatcher given a routing. */ - def dispatcherActor(routing: PF[Any, ActorRef]): ActorRef = actorOf(new Actor with Dispatcher { + def dispatcherActor(routing: PF[Any, ActorRef]): ActorRef = actorOf(new Actor with Dispatcher { def routes = routing - }).start + }).start() /** * Creates an actor that pipes all incoming messages to diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index 8c37845baf..ba4e508454 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -1,11 +1,15 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + package akka.util import java.util.concurrent.locks.ReentrantLock -import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{ TimeUnit, BlockingQueue } import java.util.{ AbstractQueue, Queue, Collection, Iterator } -class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] { +class BoundedBlockingQueue[E <: AnyRef]( + val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] { backing match { case null => throw new IllegalArgumentException("Backing Queue may not be null") @@ -32,7 +36,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin require(backing.offer(e)) notEmpty.signal() } finally { - lock.unlock() + lock.unlock() } } @@ -319,4 +323,4 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin lock.unlock() } } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index 743ce0fc4c..933e3cd9a9 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -37,7 +37,7 @@ object Duration { * Construct a Duration by parsing a String. In case of a format error, a * RuntimeException is thrown. See `unapply(String)` for more information. */ - def apply(s : String) : Duration = unapply(s) getOrElse error("format error") + def apply(s : String) : Duration = unapply(s) getOrElse sys.error("format error") /** * Deconstruct a Duration into length and unit if it is finite. @@ -77,7 +77,7 @@ object Duration { if ( ms ne null) Some(Duration(JDouble.parseDouble(length), MILLISECONDS)) else if (mus ne null) Some(Duration(JDouble.parseDouble(length), MICROSECONDS)) else if ( ns ne null) Some(Duration(JDouble.parseDouble(length), NANOSECONDS)) else - error("made some error in regex (should not be possible)") + sys.error("made some error in regex (should not be possible)") case REinf() => Some(Inf) case REminf() => Some(MinusInf) case _ => None @@ -317,26 +317,6 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { override def hashCode = toNanos.asInstanceOf[Int] } -package object duration { - implicit def intToDurationInt(n: Int) = new DurationInt(n) - implicit def longToDurationLong(n: Long) = new DurationLong(n) - implicit def doubleToDurationDouble(d: Double) = new DurationDouble(d) - - implicit def pairIntToDuration(p : (Int, TimeUnit)) = Duration(p._1, p._2) - implicit def pairLongToDuration(p : (Long, TimeUnit)) = Duration(p._1, p._2) - implicit def durationToPair(d : Duration) = (d.length, d.unit) - - implicit def intMult(i : Int) = new { - def *(d : Duration) = d * i - } - implicit def longMult(l : Long) = new { - def *(d : Duration) = d * l - } - implicit def doubleMult(f : Double) = new { - def *(d : Duration) = d * f - } -} - class DurationInt(n: Int) { def nanoseconds = Duration(n, NANOSECONDS) def nanos = Duration(n, NANOSECONDS) diff --git a/akka-actor/src/main/scala/akka/util/ListenerManagement.scala b/akka-actor/src/main/scala/akka/util/ListenerManagement.scala index 916fac9c6a..777c048d70 100644 --- a/akka-actor/src/main/scala/akka/util/ListenerManagement.scala +++ b/akka-actor/src/main/scala/akka/util/ListenerManagement.scala @@ -27,7 +27,7 @@ trait ListenerManagement { * The listener is started by this method if manageLifeCycleOfListeners yields true. */ def addListener(listener: ActorRef) { - if (manageLifeCycleOfListeners) listener.start + if (manageLifeCycleOfListeners) listener.start() listeners add listener } @@ -37,7 +37,7 @@ trait ListenerManagement { */ def removeListener(listener: ActorRef) { listeners remove listener - if (manageLifeCycleOfListeners) listener.stop + if (manageLifeCycleOfListeners) listener.stop() } /* diff --git a/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala b/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala index 41d1106818..f4ceba6ebe 100644 --- a/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala +++ b/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala @@ -6,7 +6,6 @@ package akka.util import akka.dispatch.{Future, CompletableFuture, MessageInvocation} import akka.config.{Config, ModuleNotAvailableException} -import akka.AkkaException import java.net.InetSocketAddress import akka.remoteinterface.RemoteSupport @@ -45,13 +44,13 @@ object ReflectiveAccess { def ensureEnabled = if (!isEnabled) { val e = new ModuleNotAvailableException( "Can't load the remoting module, make sure that akka-remote.jar is on the classpath") - EventHandler.warning(this, e.toString) + EventHandler.debug(this, e.toString) throw e } val remoteSupportClass: Option[Class[_ <: RemoteSupport]] = getClassFor(TRANSPORT) - protected[akka] val defaultRemoteSupport: Option[() => RemoteSupport] = - remoteSupportClass map { remoteClass => + protected[akka] val defaultRemoteSupport: Option[() => RemoteSupport] = + remoteSupportClass map { remoteClass => () => createInstance[RemoteSupport]( remoteClass, Array[Class[_]](), @@ -59,7 +58,7 @@ object ReflectiveAccess { ) getOrElse { val e = new ModuleNotAvailableException( "Can't instantiate [%s] - make sure that akka-remote.jar is on the classpath".format(remoteClass.getName)) - EventHandler.warning(this, e.toString) + EventHandler.debug(this, e.toString) throw e } } @@ -135,7 +134,7 @@ object ReflectiveAccess { Some(ctor.newInstance(args: _*).asInstanceOf[T]) } catch { case e: Exception => - EventHandler.warning(this, e.toString) + EventHandler.debug(this, e.toString) None } @@ -154,7 +153,7 @@ object ReflectiveAccess { } } catch { case e: Exception => - EventHandler.warning(this, e.toString) + EventHandler.debug(this, e.toString) None } @@ -168,7 +167,7 @@ object ReflectiveAccess { } } catch { case e: ExceptionInInitializerError => - EventHandler.warning(this, e.toString) + EventHandler.debug(this, e.toString) throw e } @@ -176,23 +175,23 @@ object ReflectiveAccess { assert(fqn ne null) // First, use the specified CL - val first = try { - Option(classloader.loadClass(fqn).asInstanceOf[Class[T]]) - } catch { - case c: ClassNotFoundException => - EventHandler.warning(this, c.toString) - None - } + val first = try { + Option(classloader.loadClass(fqn).asInstanceOf[Class[T]]) + } catch { + case c: ClassNotFoundException => + EventHandler.debug(this, c.toString) + None + } if (first.isDefined) first - else { + else { // Second option is to use the ContextClassLoader - val second = try { - Option(Thread.currentThread.getContextClassLoader.loadClass(fqn).asInstanceOf[Class[T]]) - } catch { - case c: ClassNotFoundException => - EventHandler.warning(this, c.toString) - None + val second = try { + Option(Thread.currentThread.getContextClassLoader.loadClass(fqn).asInstanceOf[Class[T]]) + } catch { + case c: ClassNotFoundException => + EventHandler.debug(this, c.toString) + None } if (second.isDefined) second @@ -201,22 +200,22 @@ object ReflectiveAccess { // Don't try to use "loader" if we got the default "classloader" parameter if (classloader ne loader) Option(loader.loadClass(fqn).asInstanceOf[Class[T]]) else None - } catch { - case c: ClassNotFoundException => - EventHandler.warning(this, c.toString) - None + } catch { + case c: ClassNotFoundException => + EventHandler.debug(this, c.toString) + None } if (third.isDefined) third else { // Last option is Class.forName - try { + try { Option(Class.forName(fqn).asInstanceOf[Class[T]]) - } catch { - case c: ClassNotFoundException => - EventHandler.warning(this, c.toString) - None - } + } catch { + case c: ClassNotFoundException => + EventHandler.debug(this, c.toString) + None + } } } } diff --git a/akka-actor/src/main/scala/akka/util/package.scala b/akka-actor/src/main/scala/akka/util/package.scala new file mode 100644 index 0000000000..662c0d7393 --- /dev/null +++ b/akka-actor/src/main/scala/akka/util/package.scala @@ -0,0 +1,27 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.util + +import java.util.concurrent.TimeUnit + +package object duration { + implicit def intToDurationInt(n: Int) = new DurationInt(n) + implicit def longToDurationLong(n: Long) = new DurationLong(n) + implicit def doubleToDurationDouble(d: Double) = new DurationDouble(d) + + implicit def pairIntToDuration(p : (Int, TimeUnit)) = Duration(p._1, p._2) + implicit def pairLongToDuration(p : (Long, TimeUnit)) = Duration(p._1, p._2) + implicit def durationToPair(d : Duration) = (d.length, d.unit) + + implicit def intMult(i : Int) = new { + def *(d : Duration) = d * i + } + implicit def longMult(l : Long) = new { + def *(d : Duration) = d * l + } + implicit def doubleMult(f : Double) = new { + def *(d : Duration) = d * f + } +} diff --git a/akka-actor/src/test/scala/akka/Testing.scala b/akka-actor/src/test/scala/akka/Testing.scala deleted file mode 100644 index afc0c4a05a..0000000000 --- a/akka-actor/src/test/scala/akka/Testing.scala +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright (C) 2009-2011 Scalable Solutions AB - */ - -package akka - -/** - * Multiplying numbers used in test timeouts by a factor, set by system property. - * Useful for Jenkins builds (where the machine may need more time). - */ -object Testing { - val timeFactor: Double = { - val factor = System.getProperty("akka.test.timefactor", "1.0") - try { - factor.toDouble - } catch { - case e: java.lang.NumberFormatException => 1.0 - } - } - - def time(t: Int): Int = (timeFactor * t).toInt - def time(t: Long): Long = (timeFactor * t).toLong - def time(t: Float): Float = (timeFactor * t).toFloat - def time(t: Double): Double = timeFactor * t -} diff --git a/akka-actor/src/test/scala/akka/actor/actor/ActorFireForgetRequestReplySpec.scala b/akka-actor/src/test/scala/akka/actor/actor/ActorFireForgetRequestReplySpec.scala deleted file mode 100644 index 1eef7f068c..0000000000 --- a/akka-actor/src/test/scala/akka/actor/actor/ActorFireForgetRequestReplySpec.scala +++ /dev/null @@ -1,93 +0,0 @@ -package akka.actor - -import java.util.concurrent.{TimeUnit, CyclicBarrier, TimeoutException} -import akka.config.Supervision._ -import org.scalatest.junit.JUnitSuite -import org.junit.Test - -import akka.dispatch.Dispatchers -import Actor._ - -import akka.Testing - -object ActorFireForgetRequestReplySpec { - class ReplyActor extends Actor { - - def receive = { - case "Send" => - self.reply("Reply") - case "SendImplicit" => - self.sender.get ! "ReplyImplicit" - } - } - - class CrashingTemporaryActor extends Actor { - self.lifeCycle = Temporary - - def receive = { - case "Die" => - state.finished.await - throw new Exception("Expected exception") - } - } - - class SenderActor(replyActor: ActorRef) extends Actor { - - def receive = { - case "Init" => - replyActor ! "Send" - case "Reply" => { - state.s = "Reply" - state.finished.await - } - case "InitImplicit" => replyActor ! "SendImplicit" - case "ReplyImplicit" => { - state.s = "ReplyImplicit" - state.finished.await - } - } - } - - object state { - var s = "NIL" - val finished = new CyclicBarrier(2) - } -} - -class ActorFireForgetRequestReplySpec extends JUnitSuite { - import ActorFireForgetRequestReplySpec._ - - @Test - def shouldReplyToBangMessageUsingReply = { - state.finished.reset - val replyActor = actorOf[ReplyActor].start - val senderActor = actorOf(new SenderActor(replyActor)).start - senderActor ! "Init" - try { state.finished.await(1L, TimeUnit.SECONDS) } - catch { case e: TimeoutException => fail("Never got the message") } - assert("Reply" === state.s) - } - - @Test - def shouldReplyToBangMessageUsingImplicitSender = { - state.finished.reset - val replyActor = actorOf[ReplyActor].start - val senderActor = actorOf(new SenderActor(replyActor)).start - senderActor ! "InitImplicit" - try { state.finished.await(1L, TimeUnit.SECONDS) } - catch { case e: TimeoutException => fail("Never got the message") } - assert("ReplyImplicit" === state.s) - } - - @Test - def shouldShutdownCrashedTemporaryActor = { - state.finished.reset - val actor = actorOf[CrashingTemporaryActor].start - assert(actor.isRunning) - actor ! "Die" - try { state.finished.await(10L, TimeUnit.SECONDS) } - catch { case e: TimeoutException => fail("Never got the message") } - Thread.sleep(Testing.time(500)) - assert(actor.isShutdown) - } -} diff --git a/akka-actor/src/test/scala/akka/actor/actor/ActorRefSpec.scala b/akka-actor/src/test/scala/akka/actor/actor/ActorRefSpec.scala deleted file mode 100644 index d5d6b28edc..0000000000 --- a/akka-actor/src/test/scala/akka/actor/actor/ActorRefSpec.scala +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Copyright (C) 2009-2011 Scalable Solutions AB - */ - -package akka.actor - -import org.scalatest.Spec -import org.scalatest.matchers.ShouldMatchers -import org.scalatest.BeforeAndAfterAll -import org.scalatest.junit.JUnitRunner -import org.junit.runner.RunWith - -import akka.actor._ -import akka.dispatch.Future -import java.util.concurrent.{CountDownLatch, TimeUnit} - -object ActorRefSpec { - - var latch = new CountDownLatch(4) - - class ReplyActor extends Actor { - var replyTo: Channel[Any] = null - - def receive = { - case "complexRequest" => { - replyTo = self.channel - val worker = Actor.actorOf[WorkerActor].start - worker ! "work" - } - case "complexRequest2" => - val worker = Actor.actorOf[WorkerActor].start - worker ! self.channel - case "workDone" => replyTo ! "complexReply" - case "simpleRequest" => self.reply("simpleReply") - } - } - - class WorkerActor() extends Actor { - def receive = { - case "work" => { - work - self.reply("workDone") - self.stop - } - case replyTo: Channel[Any] => { - work - replyTo ! "complexReply" - } - } - - private def work { - Thread.sleep(1000) - } - } - - class SenderActor(replyActor: ActorRef) extends Actor { - - def receive = { - case "complex" => replyActor ! "complexRequest" - case "complex2" => replyActor ! "complexRequest2" - case "simple" => replyActor ! "simpleRequest" - case "complexReply" => { - latch.countDown - } - case "simpleReply" => { - latch.countDown - } - } - } -} - -@RunWith(classOf[JUnitRunner]) -class ActorRefSpec extends - Spec with - ShouldMatchers with - BeforeAndAfterAll { - - import ActorRefSpec._ - - describe("ActorRef") { - it("should support to reply via channel") { - val serverRef = Actor.actorOf[ReplyActor].start - val clientRef = Actor.actorOf(new SenderActor(serverRef)).start - - clientRef ! "complex" - clientRef ! "simple" - clientRef ! "simple" - clientRef ! "simple" - assert(latch.await(4L, TimeUnit.SECONDS)) - latch = new CountDownLatch(4) - clientRef ! "complex2" - clientRef ! "simple" - clientRef ! "simple" - clientRef ! "simple" - assert(latch.await(4L, TimeUnit.SECONDS)) - clientRef.stop - serverRef.stop - } - - it("should stop when sent a poison pill") { - val ref = Actor.actorOf( - new Actor { - def receive = { - case 5 => self reply_? "five" - case null => self reply_? "null" - } - } - ).start - - val ffive: Future[String] = ref !!! 5 - val fnull: Future[String] = ref !!! null - - intercept[ActorKilledException] { - ref !! PoisonPill - fail("shouldn't get here") - } - - assert(ffive.resultOrException.get == "five") - assert(fnull.resultOrException.get == "null") - - assert(ref.isRunning == false) - assert(ref.isShutdown == true) - } - } -} diff --git a/akka-actor/src/test/scala/akka/actor/actor/FSMActorSpec.scala b/akka-actor/src/test/scala/akka/actor/actor/FSMActorSpec.scala deleted file mode 100644 index 5213557048..0000000000 --- a/akka-actor/src/test/scala/akka/actor/actor/FSMActorSpec.scala +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Copyright (C) 2009-2011 Scalable Solutions AB - */ - -package akka.actor - -import org.scalatest.junit.JUnitSuite -import org.junit.Test -import FSM._ - -import org.multiverse.api.latches.StandardLatch - -import java.util.concurrent.TimeUnit - -import akka.util.duration._ - -object FSMActorSpec { - - - val unlockedLatch = new StandardLatch - val lockedLatch = new StandardLatch - val unhandledLatch = new StandardLatch - val terminatedLatch = new StandardLatch - val transitionLatch = new StandardLatch - val initialStateLatch = new StandardLatch - val transitionCallBackLatch = new StandardLatch - - sealed trait LockState - case object Locked extends LockState - case object Open extends LockState - - class Lock(code: String, timeout: (Long, TimeUnit)) extends Actor with FSM[LockState, CodeState] { - - startWith(Locked, CodeState("", code)) - - when(Locked) { - case Event(digit: Char, CodeState(soFar, code)) => { - soFar + digit match { - case incomplete if incomplete.length < code.length => - stay using CodeState(incomplete, code) - case codeTry if (codeTry == code) => { - doUnlock - goto(Open) using CodeState("", code) forMax timeout - } - case wrong => { - stay using CodeState("", code) - } - } - } - case Event("hello", _) => stay replying "world" - case Event("bye", _) => stop(Shutdown) - } - - when(Open) { - case Event(StateTimeout, _) => { - doLock - goto(Locked) - } - } - - whenUnhandled { - case Event(_, stateData) => { - unhandledLatch.open - stay - } - } - - onTransition(transitionHandler) - - def transitionHandler(from: LockState, to: LockState) = { - if (from == Locked && to == Open) transitionLatch.open - } - - onTermination { - case StopEvent(Shutdown, Locked, _) => - // stop is called from lockstate with shutdown as reason... - terminatedLatch.open - } - - // initialize the lock - initialize - - private def doLock() { - lockedLatch.open - } - - private def doUnlock = { - unlockedLatch.open - } - } - - case class CodeState(soFar: String, code: String) -} - -class FSMActorSpec extends JUnitSuite { - import FSMActorSpec._ - - - @Test - def unlockTheLock = { - - // lock that locked after being open for 1 sec - val lock = Actor.actorOf(new Lock("33221", (1, TimeUnit.SECONDS))).start - - val transitionTester = Actor.actorOf(new Actor { def receive = { - case Transition(_, _, _) => transitionCallBackLatch.open - case CurrentState(_, Locked) => initialStateLatch.open - }}).start - - lock ! SubscribeTransitionCallBack(transitionTester) - assert(initialStateLatch.tryAwait(1, TimeUnit.SECONDS)) - - lock ! '3' - lock ! '3' - lock ! '2' - lock ! '2' - lock ! '1' - - assert(unlockedLatch.tryAwait(1, TimeUnit.SECONDS)) - assert(transitionLatch.tryAwait(1, TimeUnit.SECONDS)) - assert(transitionCallBackLatch.tryAwait(1, TimeUnit.SECONDS)) - assert(lockedLatch.tryAwait(2, TimeUnit.SECONDS)) - - - lock ! "not_handled" - assert(unhandledLatch.tryAwait(2, TimeUnit.SECONDS)) - - val answerLatch = new StandardLatch - object Hello - object Bye - val tester = Actor.actorOf(new Actor { - protected def receive = { - case Hello => lock ! "hello" - case "world" => answerLatch.open - case Bye => lock ! "bye" - } - }).start - tester ! Hello - assert(answerLatch.tryAwait(2, TimeUnit.SECONDS)) - - tester ! Bye - assert(terminatedLatch.tryAwait(2, TimeUnit.SECONDS)) - } -} diff --git a/akka-actor/src/test/scala/akka/actor/actor/ReceiveTimeoutSpec.scala b/akka-actor/src/test/scala/akka/actor/actor/ReceiveTimeoutSpec.scala deleted file mode 100644 index 9e5fba863e..0000000000 --- a/akka-actor/src/test/scala/akka/actor/actor/ReceiveTimeoutSpec.scala +++ /dev/null @@ -1,108 +0,0 @@ -package akka.actor - -import org.scalatest.junit.JUnitSuite -import org.junit.Test - -import java.util.concurrent.TimeUnit -import org.multiverse.api.latches.StandardLatch -import Actor._ -import java.util.concurrent.atomic.AtomicInteger - -class ReceiveTimeoutSpec extends JUnitSuite { - - @Test def receiveShouldGetTimeout= { - - val timeoutLatch = new StandardLatch - - val timeoutActor = actorOf(new Actor { - self.receiveTimeout = Some(500L) - - protected def receive = { - case ReceiveTimeout => timeoutLatch.open - } - }).start - - assert(timeoutLatch.tryAwait(3, TimeUnit.SECONDS)) - timeoutActor.stop - } - - @Test def swappedReceiveShouldAlsoGetTimout = { - val timeoutLatch = new StandardLatch - - val timeoutActor = actorOf(new Actor { - self.receiveTimeout = Some(500L) - - protected def receive = { - case ReceiveTimeout => timeoutLatch.open - } - }).start - - // after max 1 second the timeout should already been sent - assert(timeoutLatch.tryAwait(3, TimeUnit.SECONDS)) - - val swappedLatch = new StandardLatch - timeoutActor ! HotSwap(self => { - case ReceiveTimeout => swappedLatch.open - }) - - assert(swappedLatch.tryAwait(3, TimeUnit.SECONDS)) - timeoutActor.stop - } - - @Test def timeoutShouldBeRescheduledAfterRegularReceive = { - - val timeoutLatch = new StandardLatch - case object Tick - val timeoutActor = actorOf(new Actor { - self.receiveTimeout = Some(500L) - - protected def receive = { - case Tick => () - case ReceiveTimeout => timeoutLatch.open - } - }).start - timeoutActor ! Tick - - assert(timeoutLatch.tryAwait(2, TimeUnit.SECONDS) == true) - timeoutActor.stop - } - - @Test def timeoutShouldBeTurnedOffIfDesired = { - val count = new AtomicInteger(0) - val timeoutLatch = new StandardLatch - case object Tick - val timeoutActor = actorOf(new Actor { - self.receiveTimeout = Some(500L) - - protected def receive = { - case Tick => () - case ReceiveTimeout => - count.incrementAndGet - timeoutLatch.open - self.receiveTimeout = None - } - }).start - timeoutActor ! Tick - - assert(timeoutLatch.tryAwait(2, TimeUnit.SECONDS) == true) - assert(count.get === 1) - timeoutActor.stop - } - - @Test def timeoutShouldNotBeSentWhenNotSpecified = { - val timeoutLatch = new StandardLatch - val timeoutActor = actorOf(new Actor { - - protected def receive = { - case ReceiveTimeout => timeoutLatch.open - } - }).start - - assert(timeoutLatch.tryAwait(1, TimeUnit.SECONDS) == false) - timeoutActor.stop - } - - @Test def ActorsReceiveTimeoutShouldBeReceiveTimeout { - assert(akka.actor.Actors.receiveTimeout() eq ReceiveTimeout) - } -} diff --git a/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala b/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala deleted file mode 100644 index ecc6dbfb4b..0000000000 --- a/akka-actor/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala +++ /dev/null @@ -1,616 +0,0 @@ -/** - * Copyright (C) 2009-2011 Scalable Solutions AB - */ - -package akka.actor - -import akka.config.Supervision._ -import akka.{OneWay, Die, Ping} -import Actor._ - -import org.scalatest.junit.JUnitSuite -import org.junit.Test -import java.util.concurrent.atomic.AtomicInteger -import java.util.concurrent. {CountDownLatch, TimeUnit, LinkedBlockingQueue} - -object SupervisorSpec { - var messageLog = new LinkedBlockingQueue[String] - var oneWayLog = new LinkedBlockingQueue[String] - - def clearMessageLogs { - messageLog.clear - oneWayLog.clear - } - - class PingPong1Actor extends Actor { - import self._ - def receive = { - case Ping => - messageLog.put("ping") - reply("pong") - - case OneWay => - oneWayLog.put("oneway") - - case Die => - throw new RuntimeException("Expected exception; to test fault-tolerance") - } - override def postRestart(reason: Throwable) { - messageLog.put(reason.getMessage) - } - } - - class PingPong2Actor extends Actor { - import self._ - def receive = { - case Ping => - messageLog.put("ping") - reply("pong") - case Die => - throw new RuntimeException("Expected exception; to test fault-tolerance") - } - override def postRestart(reason: Throwable) { - messageLog.put(reason.getMessage) - } - } - - class PingPong3Actor extends Actor { - import self._ - def receive = { - case Ping => - messageLog.put("ping") - reply("pong") - case Die => - throw new RuntimeException("Expected exception; to test fault-tolerance") - } - - override def postRestart(reason: Throwable) { - messageLog.put(reason.getMessage) - } - } - - class TemporaryActor extends Actor { - import self._ - lifeCycle = Temporary - def receive = { - case Ping => - messageLog.put("ping") - reply("pong") - case Die => - throw new RuntimeException("Expected exception; to test fault-tolerance") - } - - override def postRestart(reason: Throwable) { - messageLog.put(reason.getMessage) - } - } - - class Master extends Actor { - self.faultHandler = OneForOneStrategy(List(classOf[Exception]), 5, 1000) - val temp = self.spawnLink[TemporaryActor] - override def receive = { - case Die => temp !! (Die, 5000) - } - } -} - -/** - * @author Jonas Bonér - */ -class SupervisorSpec extends JUnitSuite { - import SupervisorSpec._ - - var pingpong1: ActorRef = _ - var pingpong2: ActorRef = _ - var pingpong3: ActorRef = _ - var temporaryActor: ActorRef = _ - - @Test def shoulNotRestartProgrammaticallyLinkedTemporaryActor = { - clearMessageLogs - val master = actorOf[Master].start - - intercept[RuntimeException] { - master !! (Die, 5000) - } - - Thread.sleep(1000) - assert(messageLog.size === 0) - } - - @Test def shoulNotRestartTemporaryActor = { - clearMessageLogs - val sup = getTemporaryActorAllForOneSupervisor - - intercept[RuntimeException] { - temporaryActor !! (Die, 5000) - } - - Thread.sleep(1000) - assert(messageLog.size === 0) - } - - @Test def shouldStartServerForNestedSupervisorHierarchy = { - clearMessageLogs - val sup = getNestedSupervisorsAllForOneConf - sup.start - - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - } - - @Test def shouldKillSingleActorOneForOne = { - clearMessageLogs - val sup = getSingleActorOneForOneSupervisor - - intercept[RuntimeException] { - pingpong1 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldCallKillCallSingleActorOneForOne = { - clearMessageLogs - val sup = getSingleActorOneForOneSupervisor - - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - intercept[RuntimeException] { - pingpong1 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldKillSingleActorAllForOne = { - clearMessageLogs - val sup = getSingleActorAllForOneSupervisor - - intercept[RuntimeException] { - pingpong1 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldCallKillCallSingleActorAllForOne = { - clearMessageLogs - val sup = getSingleActorAllForOneSupervisor - - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - intercept[RuntimeException] { - pingpong1 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldKillMultipleActorsOneForOne1 = { - clearMessageLogs - val sup = getMultipleActorsOneForOneConf - - intercept[RuntimeException] { - pingpong1 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldKillMultipleActorsOneForOne2 = { - clearMessageLogs - val sup = getMultipleActorsOneForOneConf - - intercept[RuntimeException] { - pingpong3 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldKillCallMultipleActorsOneForOne = { - clearMessageLogs - val sup = getMultipleActorsOneForOneConf - - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong2 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong3 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - intercept[RuntimeException] { - pingpong2 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong2 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong3 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldKillMultipleActorsAllForOne = { - clearMessageLogs - val sup = getMultipleActorsAllForOneConf - - intercept[RuntimeException] { - pingpong2 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldCallKillCallMultipleActorsAllForOne = { - clearMessageLogs - val sup = getMultipleActorsAllForOneConf - - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong2 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong3 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - intercept[RuntimeException] { - pingpong2 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong2 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong3 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldOneWayKillSingleActorOneForOne = { - clearMessageLogs - val sup = getSingleActorOneForOneSupervisor - - pingpong1 ! Die - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldOneWayCallKillCallSingleActorOneForOne = { - clearMessageLogs - val sup = getSingleActorOneForOneSupervisor - - pingpong1 ! OneWay - - expect("oneway") { - oneWayLog.poll(5, TimeUnit.SECONDS) - } - pingpong1 ! Die - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - pingpong1 ! OneWay - - expect("oneway") { - oneWayLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldRestartKilledActorsForNestedSupervisorHierarchy = { - clearMessageLogs - val sup = getNestedSupervisorsAllForOneConf - - - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong2 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong3 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - intercept[RuntimeException] { - pingpong2 !! (Die, 5000) - } - - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5 , TimeUnit.SECONDS) - } - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("Expected exception; to test fault-tolerance") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("pong") { - (pingpong1 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong2 !! (Ping, 5000)).getOrElse("nil") - } - - expect("pong") { - (pingpong3 !! (Ping, 5000)).getOrElse("nil") - } - - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - expect("ping") { - messageLog.poll(5, TimeUnit.SECONDS) - } - } - - @Test def shouldAttemptRestartWhenExceptionDuringRestart { - val inits = new AtomicInteger(0) - val dyingActor = actorOf(new Actor { - self.lifeCycle = Permanent - inits.incrementAndGet - - if (!(inits.get % 2 != 0)) - throw new IllegalStateException("Don't wanna!") - - def receive = { - case Ping => self.reply_?("pong") - case Die => throw new Exception("expected") - } - }) - val supervisor = - Supervisor( - SupervisorConfig( - OneForOneStrategy(classOf[Exception] :: Nil,3,10000), - Supervise(dyingActor,Permanent) :: Nil)) - - intercept[Exception] { - dyingActor !! (Die, 5000) - } - - expect("pong") { - (dyingActor !! (Ping, 5000)).getOrElse("nil") - } - - expect(3) { inits.get } - supervisor.shutdown - } - - // ============================================= - // Create some supervisors with different configurations - - def getTemporaryActorAllForOneSupervisor: Supervisor = { - temporaryActor = actorOf[TemporaryActor].start - - Supervisor( - SupervisorConfig( - AllForOneStrategy(List(classOf[Exception]), 3, 5000), - Supervise( - temporaryActor, - Temporary) - :: Nil)) - } - - def getSingleActorAllForOneSupervisor: Supervisor = { - pingpong1 = actorOf[PingPong1Actor].start - - Supervisor( - SupervisorConfig( - AllForOneStrategy(List(classOf[Exception]), 3, 5000), - Supervise( - pingpong1, - Permanent) - :: Nil)) - } - - def getSingleActorOneForOneSupervisor: Supervisor = { - pingpong1 = actorOf[PingPong1Actor].start - - Supervisor( - SupervisorConfig( - OneForOneStrategy(List(classOf[Exception]), 3, 5000), - Supervise( - pingpong1, - Permanent) - :: Nil)) - } - - def getMultipleActorsAllForOneConf: Supervisor = { - pingpong1 = actorOf[PingPong1Actor].start - pingpong2 = actorOf[PingPong2Actor].start - pingpong3 = actorOf[PingPong3Actor].start - - Supervisor( - SupervisorConfig( - AllForOneStrategy(List(classOf[Exception]), 3, 5000), - Supervise( - pingpong1, - Permanent) - :: - Supervise( - pingpong2, - Permanent) - :: - Supervise( - pingpong3, - Permanent) - :: Nil)) - } - - def getMultipleActorsOneForOneConf: Supervisor = { - pingpong1 = actorOf[PingPong1Actor].start - pingpong2 = actorOf[PingPong2Actor].start - pingpong3 = actorOf[PingPong3Actor].start - - Supervisor( - SupervisorConfig( - OneForOneStrategy(List(classOf[Exception]), 3, 5000), - Supervise( - pingpong1, - Permanent) - :: - Supervise( - pingpong2, - Permanent) - :: - Supervise( - pingpong3, - Permanent) - :: Nil)) - } - - def getNestedSupervisorsAllForOneConf: Supervisor = { - pingpong1 = actorOf[PingPong1Actor].start - pingpong2 = actorOf[PingPong2Actor].start - pingpong3 = actorOf[PingPong3Actor].start - - Supervisor( - SupervisorConfig( - AllForOneStrategy(List(classOf[Exception]), 3, 5000), - Supervise( - pingpong1, - Permanent) - :: - SupervisorConfig( - AllForOneStrategy(Nil, 3, 5000), - Supervise( - pingpong2, - Permanent) - :: - Supervise( - pingpong3, - Permanent) - :: Nil) - :: Nil)) - } -} diff --git a/akka-actor/src/test/scala/akka/dispatch/ThreadBasedDispatcherSpec.scala b/akka-actor/src/test/scala/akka/dispatch/ThreadBasedDispatcherSpec.scala deleted file mode 100644 index 603b17e336..0000000000 --- a/akka-actor/src/test/scala/akka/dispatch/ThreadBasedDispatcherSpec.scala +++ /dev/null @@ -1,91 +0,0 @@ -package akka.dispatch - -import java.util.concurrent.CountDownLatch -import java.util.concurrent.TimeUnit -import java.util.concurrent.atomic.AtomicBoolean -import java.util.concurrent.locks.Lock -import java.util.concurrent.locks.ReentrantLock - -import org.scalatest.junit.JUnitSuite -import org.junit.{Test, Before} - -import akka.actor.Actor -import Actor._ - -// FIXME use this test when we have removed the MessageInvoker classes -/* -class ThreadBasedDispatcherSpec extends JUnitSuite { - private var threadingIssueDetected: AtomicBoolean = null - val key1 = actorOf(new Actor { def receive = { case _ => {}} }) - val key2 = actorOf(new Actor { def receive = { case _ => {}} }) - val key3 = actorOf(new Actor { def receive = { case _ => {}} }) - - class TestMessageHandle(handleLatch: CountDownLatch) extends MessageInvoker { - val guardLock: Lock = new ReentrantLock - - def invoke(message: MessageInvocation) { - try { - if (threadingIssueDetected.get) return - if (guardLock.tryLock) { - handleLatch.countDown - } else { - threadingIssueDetected.set(true) - } - } catch { - case e: Exception => threadingIssueDetected.set(true) - } finally { - guardLock.unlock - } - } - } - - @Before - def setUp = { - threadingIssueDetected = new AtomicBoolean(false) - } - - @Test - def shouldMessagesDispatchedToTheSameHandlerAreExecutedSequentially = { - internalTestMessagesDispatchedToTheSameHandlerAreExecutedSequentially - } - - @Test - def shouldMessagesDispatchedToHandlersAreExecutedInFIFOOrder = { - internalTestMessagesDispatchedToHandlersAreExecutedInFIFOOrder - } - - private def internalTestMessagesDispatchedToTheSameHandlerAreExecutedSequentially(): Unit = { - val guardLock = new ReentrantLock - val handleLatch = new CountDownLatch(100) - val dispatcher = new ThreadBasedDispatcher("name", new TestMessageHandle(handleLatch)) - dispatcher.start - for (i <- 0 until 100) { - dispatcher.dispatch(new MessageInvocation(key1, new Object, None, None)) - } - assert(handleLatch.await(5, TimeUnit.SECONDS)) - assert(!threadingIssueDetected.get) - } - - private def internalTestMessagesDispatchedToHandlersAreExecutedInFIFOOrder(): Unit = { - val handleLatch = new CountDownLatch(100) - val dispatcher = new ThreadBasedDispatcher("name", new MessageInvoker { - var currentValue = -1; - def invoke(message: MessageInvocation) { - if (threadingIssueDetected.get) return - val messageValue = message.message.asInstanceOf[Int] - if (messageValue.intValue == currentValue + 1) { - currentValue = messageValue.intValue - handleLatch.countDown - } else threadingIssueDetected.set(true) - } - }) - dispatcher.start - for (i <- 0 until 100) { - dispatcher.dispatch(new MessageInvocation(key1, i, None, None)) - } - assert(handleLatch.await(5, TimeUnit.SECONDS)) - assert(!threadingIssueDetected.get) - dispatcher.postStop - } -} -*/ diff --git a/akka-docs/Makefile b/akka-docs/Makefile new file mode 100644 index 0000000000..fedddbee17 --- /dev/null +++ b/akka-docs/Makefile @@ -0,0 +1,49 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html singlehtml latex pdf + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " singlehtml to make a single large HTML file" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " pdf to make LaTeX files and run them through pdflatex" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +pdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + make -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + diff --git a/akka-docs/_static/akka.png b/akka-docs/_static/akka.png new file mode 100644 index 0000000000..d79821a047 Binary files /dev/null and b/akka-docs/_static/akka.png differ diff --git a/akka-docs/_static/logo.png b/akka-docs/_static/logo.png new file mode 100644 index 0000000000..2c36c66a36 Binary files /dev/null and b/akka-docs/_static/logo.png differ diff --git a/akka-docs/conf.py b/akka-docs/conf.py new file mode 100644 index 0000000000..4ff27f40bb --- /dev/null +++ b/akka-docs/conf.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# +# Akka documentation build configuration file. +# + +import sys, os + +# -- General configuration ----------------------------------------------------- + +extensions = ['sphinx.ext.todo'] + +templates_path = ['_templates'] +source_suffix = '.rst' +master_doc = 'index' +exclude_patterns = ['_build', 'pending'] + +project = u'Akka' +copyright = u'2009-2011, Scalable Solutions AB' +version = '1.1' +release = '1.1' + +pygments_style = 'akka' +highlight_language = 'scala' + +# -- Options for HTML output --------------------------------------------------- + +html_theme = 'akka' +html_theme_options = { + 'full_logo': 'true' + } +html_theme_path = ['themes'] + +html_title = 'Akka Documentation' +html_logo = '_static/logo.png' +#html_favicon = None + +html_static_path = ['_static'] + +html_last_updated_fmt = '%b %d, %Y' +#html_sidebars = {} +#html_additional_pages = {} +html_domain_indices = False +html_use_index = False +html_show_sourcelink = False +html_show_sphinx = False +html_show_copyright = True +htmlhelp_basename = 'Akkadoc' + +# -- Options for LaTeX output -------------------------------------------------- + +latex_paper_size = 'a4' +latex_font_size = '10pt' + +latex_documents = [ + ('index', 'Akka.tex', u' Akka Documentation', + u'Scalable Solutions AB', 'manual'), +] + +latex_elements = { + 'classoptions': ',oneside,openany', + 'babel': '\\usepackage[english]{babel}', + 'preamble': '\\definecolor{VerbatimColor}{rgb}{0.935,0.935,0.935}' + } + +# latex_logo = '_static/akka.png' diff --git a/akka-docs/index.rst b/akka-docs/index.rst new file mode 100644 index 0000000000..c7b2486170 --- /dev/null +++ b/akka-docs/index.rst @@ -0,0 +1,81 @@ +Contents +======== + +.. toctree:: + :maxdepth: 2 + + manual/getting-started-first + pending/actor-registry-java + pending/actor-registry-scala + pending/actors-scala + pending/agents-scala + pending/articles + pending/benchmarks + pending/building-akka + pending/buildr + pending/cluster-membership + pending/companies-using-akka + pending/configuration + pending/dataflow-java + pending/dataflow-scala + pending/deployment-scenarios + pending/developer-guidelines + pending/dispatchers-java + pending/dispatchers-scala + pending/event-handler + pending/external-sample-projects + pending/fault-tolerance-java + pending/fault-tolerance-scala + pending/Feature Stability Matrix + pending/fsm-scala + pending/futures-scala + pending/getting-started + pending/guice-integration + pending/Home + pending/http + pending/issue-tracking + pending/language-bindings + pending/licenses + pending/logging + pending/Migration-1.0-1.1 + pending/migration-guide-0.10.x-1.0.x + pending/migration-guide-0.7.x-0.8.x + pending/migration-guide-0.8.x-0.9.x + pending/migration-guide-0.9.x-0.10.x + pending/migration-guides + pending/Recipes + pending/release-notes + pending/remote-actors-java + pending/remote-actors-scala + pending/routing-java + pending/routing-scala + pending/scheduler + pending/security + pending/serialization-java + pending/serialization-scala + pending/servlet + pending/slf4j + pending/sponsors + pending/stm + pending/stm-java + pending/stm-scala + pending/team + pending/test + pending/testkit + pending/testkit-example + pending/third-party-integrations + pending/transactors-java + pending/transactors-scala + pending/tutorial-chat-server-java + pending/tutorial-chat-server-scala + pending/typed-actors-java + pending/typed-actors-scala + pending/untyped-actors-java + pending/use-cases + pending/web + +Links +===== + +* `Akka Documentation `_ +* `Support `_ diff --git a/akka-docs/manual/getting-started-first.rst b/akka-docs/manual/getting-started-first.rst new file mode 100644 index 0000000000..e0f6be1954 --- /dev/null +++ b/akka-docs/manual/getting-started-first.rst @@ -0,0 +1,503 @@ +Getting Started Tutorial: First Chapter +======================================= + +Introduction +------------ + +Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala is and will now focus on the steps necessary to start your first project. + +There are two variations of this first tutorial: + +- creating a standalone project and run it from the command line +- creating a SBT (Simple Build Tool) project and running it from within SBT + +Since they are so similar we will present them both in this tutorial. + +The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster. + +We will be using an algorithm that is called "embarrassingly parallel" which just means that each job is completely isolated and not coupled with any other job. Since this algorithm is so parallelizable it suits the actor model very well. + +Here is the formula for the algorithm we will use: + +.. image:: pi-formula.png + +In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed, when each worker has processed its chunk it sends a result back to the master which aggregates to total result. + +Tutorial source code +-------------------- + +If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_. + +Prerequisites +------------- + +This tutorial assumes that you have Jave 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code in. + +Downloading and installing Akka +------------------------------- + +If you want to be able to build and run the tutorial sample from the command line then you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. + +Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory. + +You need to do one more thing in order to install Akka properly and that is to set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell and navigating down to the distribution and setting the ``AKKA_HOME`` variable:: + + $ cd /Users/jboner/tools/akka-1.1 + $ export AKKA_HOME=`pwd` + $ echo $AKKA_HOME + /Users/jboner/tools/akka-1.1 + +If we now take a look at what we have in this distribution, looks like this:: + + $ ls -l + total 16944 + drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 . + drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 .. + drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy + drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist + drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed + -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar + drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts + +- In the ``dist`` directory we have all the Akka JARs, including sources and docs. +- In the ``lib_managed/compile`` directory we have all the Akka's dependency JARs. +- In the ``deploy`` directory we have all the sample JARs. +- In the ``scripts`` directory we have scripts for running Akka. +- Finallly the ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. + +The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors. + +Akka is very modular and has many JARs for containing different features. The core distribution has seven modules: + +- ``akka-actor-1.1.jar`` -- Standard Actors +- ``akka-typed-actor-1.1.jar`` -- Typed Actors +- ``akka-remote-1.1.jar`` -- Remote Actors +- ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures +- ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration +- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener +- ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors + +We also have Akka Modules containing add-on modules for the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today but for your information the module JARs are these: + +- ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) +- ``akka-amqp-1.1.jar`` -- AMQP integration +- ``akka-camel-1.1.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world) +- ``akka-camel-typed-1.1.jar`` -- Apache Camel Typed Actors integration +- ``akka-scalaz-1.1.jar`` -- Support for the Scalaz library +- ``akka-spring-1.1.jar`` -- Spring framework integration +- ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + +Downloading and installing Scala +-------------------------------- + +If you want to be able to build and run the tutorial sample from the command line then you have to install the Scala distribution. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. + +Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0 final release. If you pick the ``tgz`` or ``zip`` distributions then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. + +You also need to make sure that the ``scala-2.9.0-final/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: + + $ export PATH=$PATH:scala-2.9.0-final/bin + +Now you can test you installation by invoking and see the printout:: + + $ scala -version + Scala code runner version 2.9.0.final -- Copyright 2002-2011, LAMP/EPFL + +Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first). + +Some tools requires you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that. + +Downloading and installing SBT +------------------------------ + +SBT, short for 'Simple Build Tool' is an excellent build system written in Scala. You are using Scala to write the build scripts which gives you a lot of power. It has a plugin architecture with many plugins available, something that we will take advantage of soon. SBT is the preferred way of building software in Scala. If you want to use SBT for this tutorial then follow the following instructions, if not you can skip this section and the next. + +To install SBT and create a project for this tutorial it is easiest to follow the instructions on `this page `_. The preferred SBT version to install is ``0.7.6``. + +If you have created an SBT project then step into the newly created SBT project, create a source file ``Pi.scala`` for the tutorial sample and put it in the ``src/main/scala`` directory. + +So far we only have a standard Scala project but now we need to make our project an Akka project. You could add the dependencies manually to the build script, but the easiest way is to use Akka's SBT Plugin, covered in the next section. + +Creating an Akka SBT project +---------------------------- + +If you have not already done so, now is the time to create an SBT project for our tutorial. You do that by stepping into the directory you want to create your project in and invoking the ``sbt`` command answering the questions for setting up your project (just pressing ENTER will choose the default in square brackets):: + + $ sbt + Project does not exist, create new project? (y/N/s) y + Name: Tutorial 1 + Organization: Hakkers Inc + Version [1.0]: + Scala version [2.9.0]: + sbt version [0.7.6]: + +Now we have the basis for an SBT project. Akka has an SBT Plugin that makes it very easy to use Akka is an SBT-based project so let's use that. + +To use the plugin, first add a plugin definition to your SBT project by creating a ``Plugins.scala`` file in the ``project/plugins`` directory containing:: + + import sbt._ + + class Plugins(info: ProjectInfo) extends PluginDefinition(info) { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1" + } + +Now we need to create a project definition using our Akka SBT plugin. We do that by creating a ``Project.scala`` file in the ``build`` directory containing:: + + import sbt._ + + class TutorialOneProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + } + +The magic is in mixing in the ``AkkaProject`` trait. + +Not needed in this tutorial, but if you would like to use additional Akka modules than ``akka-actor`` then you can add these as "module configurations" in the project file. Here is an example adding ``akka-remote`` and ``akka-stm``:: + + class AkkaSampleProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { + val akkaSTM = akkaModule("stm") + val akkaRemote = akkaModule("remote") + } + +So, now we are all set. Just one final thing to do; make SBT download all dependencies it needs. That is done by invoking:: + + > update + +SBT itself needs a whole bunch of dependencies but our project will only need one; ``akka-actor-1.1.jar``. SBT downloads that as well. + +Start writing the code +---------------------- + +Now it's about time that we start hacking. + +We start by creating a ``Pi.scala`` file and add these import statements at the top of the file:: + + package akka.tutorial.scala.first + + import akka.actor.{Actor, ActorRef, PoisonPill} + import Actor._ + import akka.routing.{Routing, CyclicIterator} + import Routing._ + import akka.dispatch.Dispatchers + + import java.util.concurrent.CountDownLatch + +If you are using SBT in this tutorial then create the file in the ``src/main/scala`` directory. + +If you are using the command line tools then just create the file wherever you want. I will create it in a directory called ``tutorial`` at the root of the Akka distribution, e.g. in ``$AKKA_HOME/tutorial/Pi.scala``. + +Creating the messages +--------------------- + +The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, sends out these work chunks to the different workers in a round-robin fashion. The master then waits until all the workers have completed all the work and sent back the result for aggregation. When computation is completed the master prints out the result, shuts down all workers an then himself. + +With this in mind, let's now create the messages that we want to have flowing in the system. We need three different messages: + +- ``Calculate`` -- sent to the ``Master`` actor to start the calculation +- ``Work`` -- sent from the ``Master`` actor to the ``Worker`` actors containing the work assignment +- ``Result`` -- sent from the ``Worker`` actors to the ``Master`` actor containing the result from the worker's calculation + +Messages sent to actors should always be immutable to avoid sharing mutable state. In scala we have 'case classes' which make excellent messages. So let's start by creating three messages as case classes. We also create a common base trait for our messages (that we define as being ``sealed`` in order to prevent creating messages outside our control):: + + sealed trait PiMessage + + case object Calculate extends PiMessage + + case class Work(start: Int, nrOfElements: Int) extends PiMessage + + case class Result(value: Double) extends PiMessage + +Creating the worker +------------------- + +Now we can create the worker actor. This is done by mixing in the ``Actor`` trait and defining the ``receive`` method. The ``receive`` method defines our message handler. We expect it to be able to handle the ``Work`` message so we need to add a handler for this message:: + + class Worker extends Actor { + def receive = { + case Work(start, nrOfElements) => + self reply Result(calculatePiFor(start, nrOfElements)) // perform the work + } + } + +As you can see we have now created an ``Actor`` with a ``receive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``self.reply``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference use. + +The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method. There are many ways we can implement this algorithm in Scala, in this introductory tutorial we have chosen an imperative style using a for comprehension and an accumulator:: + + def calculatePiFor(start: Int, nrOfElements: Int): Double = { + var acc = 0.0 + for (i <- start until (start + nrOfElements)) + acc += 4 * math.pow(-1, i) / (2 * i + 1) + acc + } + +Creating the master +------------------- + +The master actor is a little bit more involved. In its constructor we need to create the workers (the ``Worker`` actors) and start them. We will also wrap them in a load-balancing router to make it easier to spread out the work evenly between the workers. Let's do that first:: + + // create the workers + val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start()) + + // wrap them with a load-balancing router + val router = Routing.loadBalancerActor(CyclicIterator(workers)).start() + +As you can see we are using the ``actorOf`` factory method to create actors, this method returns as an ``ActorRef`` which is a reference to our newly created actor. This method is available in the ``Actor`` object but is usually imported:: + + import akka.actor.Actor._ + +Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above to see that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We had to pass in three integer variables needed: + +- ``nrOfWorkers`` -- defining how many workers we should start up +- ``nrOfMessages`` -- defining how many number chunks should send out to the workers +- ``nrOfElements`` -- defining how big the number chunks sent to each worker should be + +Let's now write the master actor:: + + class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) + extends Actor { + + var pi: Double = _ + var nrOfResults: Int = _ + var start: Long = _ + + // create the workers + val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start()) + + // wrap them with a load-balancing router + val router = Routing.loadBalancerActor(CyclicIterator(workers)).start() + + def receive = { ... } + + override def preStart { + start = now + } + + override def postStop { + // tell the world that the calculation is complete + println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start))) + latch.countDown() + } + } + +Couple of things are worth explaining further. + +First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for doing plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``!!!`` to achive the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now. + +Second, we are adding a couple of life-cycle callback methods; ``preStart`` and ``postStop``. In the ``preStart`` callback we are recording the time when the actor is started and in the ``postStop`` callback we are printing out the result (the approximation of Pi) and the time it took to calculate it. In this call we also invoke ``latch.countDown`` to tell the outside world that we are done. + +But we are not done yet. We are missing the message handler for the ``Master`` actor. This message handler needs to be able to react to two different messages: + +- ``Calculate`` -- which should start the calculation +- ``Result`` -- which should aggregate the different results + +The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``Broadcast(PoisonPill)`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). The ``PoisonPill`` is a special kind of message that tells the receiver to shut himself down using the normal shutdown; ``self.stop()``. Then we also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down). + +The ``Result`` handler is simpler, here we just get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back and if it matches the number of tasks sent out the ``Master`` actor considers itself done and shuts himself down. + +Now, let's capture this in code:: + + // message handler + def receive = { + case Calculate => + // schedule work + for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements) + + // send a PoisonPill to all workers telling them to shut down themselves + router ! Broadcast(PoisonPill) + + // send a PoisonPill to the router, telling him to shut himself down + router ! PoisonPill + + case Result(value) => + // handle result from the worker + pi += value + nrOfResults += 1 + if (nrOfResults == nrOfMessages) self.stop() + } + +Bootstrap the calculation +------------------------- + +Now the only thing that is left to implement is the runner that should bootstrap and run his calculation for us. We do that by creating an object that we call ``Pi``, here we can extend the ``App`` trait in Scala which means that we will be able to run this as an application directly from the command line. The ``Pi`` object is a perfect container module for our actors and messages, so let's put them all there. We also create a method ``calculate`` in which we start up the ``Master`` actor and waits for it to finish:: + + object Pi extends App { + + calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) + + ... // actors and messages + + def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) { + + // this latch is only plumbing to know when the calculation is completed + val latch = new CountDownLatch(1) + + // create the master + val master = actorOf(new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() + + // start the calculation + master ! Calculate + + // wait for master to shut down + latch.await() + } + } + +That's it. Now we are done. + +But before we package it up and run it, let's take a look at the full code now, with package declaration, imports and all:: + + package akka.tutorial.scala.first + + import akka.actor.{Actor, PoisonPill} + import Actor._ + import akka.routing.{Routing, CyclicIterator} + import Routing._ + + import System.{currentTimeMillis => now} + import java.util.concurrent.CountDownLatch + + object Pi extends App { + + calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) + + // ==================== + // ===== Messages ===== + // ==================== + sealed trait PiMessage + case object Calculate extends PiMessage + case class Work(start: Int, nrOfElements: Int) extends PiMessage + case class Result(value: Double) extends PiMessage + + // ================== + // ===== Worker ===== + // ================== + class Worker extends Actor { + + // define the work + def calculatePiFor(start: Int, nrOfElements: Int): Double = { + var acc = 0.0 + for (i <- start until (start + nrOfElements)) + acc += 4 * math.pow(-1, i) / (2 * i + 1) + acc + } + + def receive = { + case Work(start, nrOfElements) => + self reply Result(calculatePiFor(start, nrOfElements)) // perform the work + } + } + + // ================== + // ===== Master ===== + // ================== + class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) + extends Actor { + + var pi: Double = _ + var nrOfResults: Int = _ + var start: Long = _ + + // create the workers + val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start()) + + // wrap them with a load-balancing router + val router = Routing.loadBalancerActor(CyclicIterator(workers)).start() + + // message handler + def receive = { + case Calculate => + // schedule work + //for (arg <- 0 until nrOfMessages) router ! Work(arg, nrOfElements) + for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements) + + // send a PoisonPill to all workers telling them to shut down themselves + router ! Broadcast(PoisonPill) + + // send a PoisonPill to the router, telling him to shut himself down + router ! PoisonPill + + case Result(value) => + // handle result from the worker + pi += value + nrOfResults += 1 + if (nrOfResults == nrOfMessages) self.stop() + } + + override def preStart { + start = now + } + + override def postStop { + // tell the world that the calculation is complete + println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start))) + latch.countDown() + } + } + + // ================== + // ===== Run it ===== + // ================== + def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) { + + // this latch is only plumbing to know when the calculation is completed + val latch = new CountDownLatch(1) + + // create the master + val master = actorOf(new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() + + // start the calculation + master ! Calculate + + // wait for master to shut down + latch.await() + } + } + +Run it as a command line application +------------------------------------ + +If you have not typed (or copied) in the code for the tutorial in the ``$AKKA_HOME/tutorial/Pi.scala`` then now is the time. When that is done open up a shell and step in to the Akka distribution (``cd $AKKA_HOME``). + +First we need to compile the source file. That is done with Scala's compiler ``scalac``. Our application depends on the ``akka-actor-1.1.jar`` JAR file, so let's add that to the compiler classpath when we compile the source:: + + $ scalac -cp dist/akka-actor-1.1.jar tutorial/Pi.scala + +When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` JAR file to the classpath, this time we also need to add the Scala runtime library ``scala-library.jar`` and the classes we compiled ourselves to the classpath:: + + $ java -cp dist/akka-actor-1.1.jar:scala-library.jar:tutorial akka.tutorial.scala.first.Pi + AKKA_HOME is defined as [/Users/jboner/src/akka-stuff/akka-core], loading config from \ + [/Users/jboner/src/akka-stuff/akka-core/config/akka.conf]. + + Pi estimate: 3.1435501812459323 + Calculation time: 858 millis + +Yippee! It is working. + +Run it inside SBT +----------------- + +If you have based the tutorial on SBT then you can run the application directly inside SBT. First you need to compile the project:: + + $ sbt + > update + ... + > compile + ... + +When this in done we can run our application directly inside SBT:: + + > run + ... + Pi estimate: 3.1435501812459323 + Calculation time: 942 millis + +Yippee! It is working. + +Conclusion +---------- + +Now we have learned how to create our first Akka project utilizing Akka's actors to speed up a computation intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned how to compile and run an Akka project utilizing either the tools on the command line or the SBT build system. + +Now we are ready to take on more advanced problems. In the next tutorial we will build upon this one, refactor it into more idiomatic Akka and Scala code and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_. + +Happy hakking. diff --git a/akka-docs/manual/more.png b/akka-docs/manual/more.png new file mode 100644 index 0000000000..3eb7b05c84 Binary files /dev/null and b/akka-docs/manual/more.png differ diff --git a/akka-docs/manual/pi-formula.png b/akka-docs/manual/pi-formula.png new file mode 100644 index 0000000000..a813844901 Binary files /dev/null and b/akka-docs/manual/pi-formula.png differ diff --git a/akka-docs/pending/Feature Stability Matrix.rst b/akka-docs/pending/Feature Stability Matrix.rst new file mode 100644 index 0000000000..cdbd6b3ad9 --- /dev/null +++ b/akka-docs/pending/Feature Stability Matrix.rst @@ -0,0 +1,31 @@ +Feature Stability Matrix +======================== + +Akka is comprised of a number if modules, with different levels of maturity and in different parts of their lifecycle, the matrix below gives you get current stability level of the modules. + +Explanation of the different levels of stability +------------------------------------------------ + +* **Solid** - Proven solid in heavy production usage +* **Stable** - Ready for use in production environment +* **In progress** - Not enough feedback/use to claim it's ready for production use + +||~ Feature ||~ Solid ||~ Stable ||~ In progress || +||= ====`Actors (Scala) `_ ==== ||= Solid ||= ||= || +||= ====`Actors (Java) `_ ==== ||= Solid ||= ||= || +||= ====` Typed Actors (Scala) `_ ==== ||= Solid ||= ||= || +||= ====` Typed Actors (Java) `_ ==== ||= Solid ||= ||= || +||= ====`STM (Scala) `_ ==== ||= Solid ||= ||= || +||= ====`STM (Java) `_ ==== ||= Solid ||= ||= || +||= ====`Transactors (Scala) `_ ==== ||= Solid ||= ||= || +||= ====`Transactors (Java) `_ ==== ||= Solid ||= ||= || +||= ====`Remote Actors (Scala) `_ ==== ||= Solid ||= ||= || +||= ====`Remote Actors (Java) `_ ==== ||= Solid ||= ||= || +||= ====`Camel `_ ==== ||= Solid ||= ||= || +||= ====`AMQP `_ ==== ||= Solid ||= ||= || +||= ====`HTTP `_ ==== ||= Solid ||= ||= || +||= ====`Integration Guice `_ ==== ||= ||= Stable ||= || +||= ====`Integration Spring `_ ==== ||= ||= Stable ||= || +||= ====`JTA `_ ==== ||= ||= Stable ||= || +||= ====`Scheduler `_ ==== ||= Solid ||= ||= || +||= ====`Redis Pub Sub `_ ==== ||= ||= ||= In progress || diff --git a/akka-docs/pending/Home.rst b/akka-docs/pending/Home.rst new file mode 100644 index 0000000000..73c9f31172 --- /dev/null +++ b/akka-docs/pending/Home.rst @@ -0,0 +1,60 @@ +Akka +==== + +**Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors** + +We believe that writing correct concurrent, fault-tolerant and scalable applications is too hard. Most of the time it's because we are using the wrong tools and the wrong level of abstraction. Akka is here to change that. Using the Actor Model together with Software Transactional Memory we raise the abstraction level and provide a better platform to build correct concurrent and scalable applications. For fault-tolerance we adopt the "Let it crash" / "Embrace failure" model which have been used with great success in the telecom industry to build applications that self-heals, systems that never stop. Actors also provides the abstraction for transparent distribution and the basis for truly scalable and fault-tolerant applications. Akka is Open Source and available under the Apache 2 License. + +Akka is split up into two different parts: +* Akka - Reflects all the sections under 'Scala API' and 'Java API' in the navigation bar. +* Akka Modules - Reflects all the sections under 'Add-on modules' in the navigation bar. + +Download from ``_ + +News: Akka 1.0 final is released +-------------------------------- + +1.0 documentation +----------------- + +This documentation covers the latest release ready code in 'master' branch in the repository. +If you want the documentation for the 1.0 release you can find it `here `_. + +You can watch the recording of the `Akka talk at JFokus in Feb 2011 `_. + +``_ + +**Akka implements a unique hybrid of:** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* `Actors `_, which gives you: + * Simple and high-level abstractions for concurrency and parallelism. + * Asynchronous, non-blocking and highly performant event-driven programming model. + * Very lightweight event-driven processes (create ~6.5 million actors on 4 G RAM). +* `Failure management `_ through supervisor hierarchies with `let-it-crash `_ semantics. Excellent for writing highly fault-tolerant systems that never stop, systems that self-heal. +* `Software Transactional Memory `_ (STM). (Distributed transactions coming soon). +* `Transactors `_: combine actors and STM into transactional actors. Allows you to compose atomic message flows with automatic retry and rollback. +* `Remote actors `_: highly performant distributed actors with remote supervision and error management. +* Java and Scala API. + +**Akka also has a set of add-on modules:** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* `Camel `_: Expose actors as Apache Camel endpoints. +* `Spring `_: Wire up typed actors in the Spring config using Akka's namespace. +* `REST `_ (JAX-RS): Expose actors as REST services. +* `OSGi `_: Akka and all its dependency is OSGi enabled. +* `Mist `_: Expose actors as asynchronous HTTP services. +* `Security `_: Basic, Digest and Kerberos based security. +* `Microkernel `_: Run Akka as a stand-alone self-hosted kernel. +* `FSM `_: Finite State Machine support. +* `JTA `_: Let the STM interoperate with other transactional resources. +* `Pub/Sub `_: Publish-Subscribe across remote nodes. + +**Akka can be used in two different ways:** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* As a library: used by a web app, to be put into ‘WEB-INF/lib’ or as a regular JAR on your classpath. +* As a microkernel: stand-alone kernel, embedding a servlet container and all the other modules. + +See the `Use-case and Deployment Scenarios `_ for details. diff --git a/akka-docs/pending/Migration-1.0-1.1.rst b/akka-docs/pending/Migration-1.0-1.1.rst new file mode 100644 index 0000000000..b9f88bf4fc --- /dev/null +++ b/akka-docs/pending/Migration-1.0-1.1.rst @@ -0,0 +1,32 @@ +Moved to Scala 2.9.x +^^^^^^^^^^^^^^^^^^^^ + +Akka HTTP +========= + +# akka.servlet.Initializer has been moved to akka-kernel to be able to have akka-http not depend on akka-remote, if you don't want to use the class for kernel, just create your own version of akka.servlet.Initializer, it's just a couple of lines of code and there is instructions here: `Akka Http Docs `_ +# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter `_ +# Jersey-server is now a "provided" dependency for Akka-http, so you'll need to add the dependency to your project, it's built against Jersey 1.3 + +Akka Actor +========== + +# is now dependency free, with the exception of the dependency on the scala-library.jar +# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.aevent.EventHandler or by specifying the FQN of an Actor in the akka.conf under akka.event-handlers; there is an akka-slf4j module which still provides the Logging trait and a default SLF4J logger adapter. +# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: "akka.dispatch.HawtDispatcherConfigurator" instead of "HawtDispatcher" +# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value). + +Akka Typed Actor +================ + +All methods starting with 'get*' are deprecated and will be removed in post 1.1 release. + +Akka Remote +=========== + +# UnparsebleException => CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message) + +Akka Testkit +============ + +The TestKit moved into the akka-testkit subproject and correspondingly into the akka.testkit package. diff --git a/akka-docs/pending/Recipes.rst b/akka-docs/pending/Recipes.rst new file mode 100644 index 0000000000..55bc4085a1 --- /dev/null +++ b/akka-docs/pending/Recipes.rst @@ -0,0 +1,6 @@ +Here is a list of recipies for all things Akka +============================================== + +* PostStart => `Link to Klangism `_ +* `Consumer actors best practices `_ +* `Producer actors best practices `_ diff --git a/akka-docs/pending/actor-registry-java.rst b/akka-docs/pending/actor-registry-java.rst new file mode 100644 index 0000000000..67be08b2a8 --- /dev/null +++ b/akka-docs/pending/actor-registry-java.rst @@ -0,0 +1,81 @@ +ActorRegistry (Java) +==================== + +Module stability: **SOLID** + +ActorRegistry: Finding Actors +----------------------------- + +Actors can be looked up using the 'akka.actor.Actors.registry()' object. Through this registry you can look up actors by: + +* uuid com.eaio.uuid.UUID – this uses the ‘uuid’ field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None +* id string – this uses the ‘id’ field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id +* parameterized type - returns a 'ActorRef[]' with all actors that are a subtype of this specific type +* specific actor class - returns a 'ActorRef[]' with all actors of this exact class + +Actors are automatically registered in the ActorRegistry when they are started and removed when they are stopped. But you can explicitly register and unregister ActorRef's if you need to using the 'register' and 'unregister' methods. + +Here is a summary of the API for finding actors: + +.. code-block:: java + + import static akka.actor.Actors.*; + Option actor = registry().actorFor(uuid); + ActorRef[] actors = registry().actors(); + ActorRef[] otherActors = registry().actorsFor(id); + ActorRef[] moreActors = registry().actorsFor(clazz); + +You can shut down all Actors in the system by invoking: + +.. code-block:: java + + registry().shutdownAll(); + +If you want to know when a new Actor is added or to or removed from the registry, you can use the subscription API. You can register an Actor that should be notified when an event happens in the ActorRegistry: + +.. code-block:: java + + void addListener(ActorRef listener); + void removeListener(ActorRef listener); + +The messages sent to this Actor are: + +.. code-block:: java + + public class ActorRegistered { + ActorRef actor(); + } + public class ActorUnregistered { + ActorRef actor(); + } + +So your listener Actor needs to be able to handle these two messages. Example: + +.. code-block:: java + + import akka.actor.ActorRegistered; + import akka.actor.ActorUnregistered; + import akka.actor.UntypedActor; + import akka.event.EventHandler; + + public class RegistryListener extends UntypedActor { + public void onReceive(Object message) throws Exception { + if (message instanceof ActorRegistered) { + ActorRegistered event = (ActorRegistered) message; + EventHandler.info(this, String.format("Actor registered: %s - %s", + event.actor().actorClassName(), event.actor().getUuid())); + event.actor().actorClassName(), event.actor().getUuid())); + } else if (message instanceof ActorUnregistered) { + // ... + } + } + } + +The above actor can be added as listener of registry events: + +.. code-block:: java + + import static akka.actor.Actors.*; + + ActorRef listener = actorOf(RegistryListener.class).start(); + registry().addListener(listener); diff --git a/akka-docs/pending/actor-registry-scala.rst b/akka-docs/pending/actor-registry-scala.rst new file mode 100644 index 0000000000..5f57434501 --- /dev/null +++ b/akka-docs/pending/actor-registry-scala.rst @@ -0,0 +1,107 @@ +ActorRegistry (Scala) +===================== + +Module stability: **SOLID** + +ActorRegistry: Finding Actors +----------------------------- + +Actors can be looked up by using the **akka.actor.Actor.registry: akka.actor.ActorRegistry**. Lookups for actors through this registry can be done by: + +* uuid akka.actor.Uuid – this uses the ‘**uuid**’ field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None +* id string – this uses the ‘**id**’ field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id +* specific actor class - returns an '**Array[Actor]**' with all actors of this exact class +* parameterized type - returns an '**Array[Actor]**' with all actors that are a subtype of this specific type + +Actors are automatically registered in the ActorRegistry when they are started, removed or stopped. You can explicitly register and unregister ActorRef's by using the '**register**' and '**unregister**' methods. The ActorRegistry contains many convenience methods for looking up typed actors. + +Here is a summary of the API for finding actors: + +.. code-block:: scala + + def actors: Array[ActorRef] + def actorFor(uuid: akka.actor.Uuid): Option[ActorRef] + def actorsFor(id : String): Array[ActorRef] + def actorsFor[T <: Actor](implicit manifest: Manifest[T]): Array[ActorRef] + def actorsFor[T <: Actor](clazz: Class[T]): Array[ActorRef] + + // finding typed actors + def typedActors: Array[AnyRef] + def typedActorFor(uuid: akka.actor.Uuid): Option[AnyRef] + def typedActorsFor(id: String): Array[AnyRef] + def typedActorsFor[T <: AnyRef](implicit manifest: Manifest[T]): Array[AnyRef] + def typedActorsFor[T <: AnyRef](clazz: Class[T]): Array[AnyRef] + +Examples of how to use them: + +.. code-block:: scala + + val actor = Actor.registry.actorFor(uuid) + val pojo = Actor.registry.typedActorFor(uuid) + +.. code-block:: scala + + val actors = Actor.registry.actorsFor(classOf[...]) + val pojos = Actor.registry.typedActorsFor(classOf[...]) + +.. code-block:: scala + + val actors = Actor.registry.actorsFor(id) + val pojos = Actor.registry.typedActorsFor(id) + +.. code-block:: scala + + val actors = Actor.registry.actorsFor[MyActorType] + val pojos = Actor.registry.typedActorsFor[MyTypedActorImpl] + +The ActorRegistry also has a 'shutdownAll' and 'foreach' methods: + +.. code-block:: scala + + def foreach(f: (ActorRef) => Unit) + def foreachTypedActor(f: (AnyRef) => Unit) + def shutdownAll() + +If you need to know when a new Actor is added or removed from the registry, you can use the subscription API. You can register an Actor that should be notified when an event happens in the ActorRegistry: + +.. code-block:: scala + + def addListener(listener: ActorRef) + def removeListener(listener: ActorRef) + +The messages sent to this Actor are: + +.. code-block:: scala + + case class ActorRegistered(actor: ActorRef) + case class ActorUnregistered(actor: ActorRef) + +So your listener Actor needs to be able to handle these two messages. Example: + +.. code-block:: scala + + import akka.actor.Actor + import akka.actor.ActorRegistered; + import akka.actor.ActorUnregistered; + import akka.actor.UntypedActor; + import akka.event.EventHandler; + + class RegistryListener extends Actor { + def receive = { + case event: ActorRegistered => + EventHandler.info(this, "Actor registered: %s - %s".format( + event.actor.actorClassName, event.actor.uuid)) + case event: ActorUnregistered => + // ... + } + } + +The above actor can be added as listener of registry events: + +.. code-block:: scala + + import akka.actor._ + import akka.actor.Actor._ + + val listener = actorOf[RegistryListener].start() + registry.addListener(listener) diff --git a/akka-docs/pending/actors-scala.rst b/akka-docs/pending/actors-scala.rst new file mode 100644 index 0000000000..fc456ba71e --- /dev/null +++ b/akka-docs/pending/actors-scala.rst @@ -0,0 +1,573 @@ +Actors (Scala) +============== + +Module stability: **SOLID** + +The `Actor Model `_ provides a higher level of abstraction for writing concurrent and distributed systems. It alleviates the developer from having to deal with explicit locking and thread management, making it easier to write correct concurrent and parallel systems. Actors were defined in the 1973 paper by Carl Hewitt but have been popularized by the Erlang language, and used for example at Ericsson with great success to build highly concurrent and reliable telecom systems. + +The API of Akka’s Actors is similar to Scala Actors which has borrowed some of its syntax from Erlang. + +The Akka 0.9 release introduced a new concept; ActorRef, which requires some refactoring. If you are new to Akka just read along, but if you have used Akka 0.6.x, 0.7.x and 0.8.x then you might be helped by the :doc:`0.8.x => 0.9.x migration guide ` + +Creating Actors +--------------- + +Actors can be created either by: + +* Extending the Actor class and implementing the receive method. +* Create an anonymous actor using one of the actor methods. + +Defining an Actor class +^^^^^^^^^^^^^^^^^^^^^^^ + +Actor classes are implemented by extending the Actor class and implementing the ``receive`` method. The ``receive`` method should define a series of case statements (which has the type ``PartialFunction[Any, Unit]``) that defines which messages your Actor can handle, using standard Scala pattern matching, along with the implementation of how the messages should be processed. + +Here is an example: + +.. code-block:: scala + + class MyActor extends Actor { + def receive = { + case "test" => EventHandler.info(this, "received test") + case _ => EventHandler.info(this, "received unknown message") + } + } + +Please note that the Akka Actor ``receive`` message loop is exhaustive, which is different compared to Erlang and Scala Actors. This means that you need to provide a pattern match for all messages that it can accept and if you want to be able to handle unknown messages then you need to have a default case as in the example above. + +Creating Actors +^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val myActor = Actor.actorOf[MyActor] + myActor.start() + +Normally you would want to import the ``actorOf`` method like this: + +.. code-block:: scala + + import akka.actor.Actor._ + + val myActor = actorOf[MyActor] + +To avoid prefixing it with ``Actor`` every time you use it. + +You can also start it in the same statement: + +.. code-block:: scala + + val myActor = actorOf[MyActor].start() + +The call to ``actorOf`` returns an instance of ``ActorRef``. This is a handle to the ``Actor`` instance which you can use to interact with the ``Actor``. The ``ActorRef`` is immutable and has a one to one relationship with the Actor it represents. The ``ActorRef`` is also serializable and network-aware. This means that you can serialize it, send it over the wire and use it on a remote host and it will still be representing the same Actor on the original node, across the network. + +Creating Actors with non-default constructor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If your Actor has a constructor that takes parameters then you can't create it using ``actorOf[TYPE]``. Instead you can use a variant of ``actorOf`` that takes a call-by-name block in which you can create the Actor in any way you like. + +Here is an example: + +.. code-block:: scala + + val a = actorOf(new MyActor(..)).start() // allows passing in arguments into the MyActor constructor + +Running a block of code asynchronously +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here we create a light-weight actor-based thread, that can be used to spawn off a task. Code blocks spawned up like this are always implicitly started, shut down and made eligible for garbage collection. The actor that is created "under the hood" is not reachable from the outside and there is no way of sending messages to it. It being an actor is only an implementation detail. It will only run the block in an event-based thread and exit once the block has run to completion. + +.. code-block:: scala + + spawn { + ... // do stuff + } + +Identifying Actors +------------------ + +Each Actor has two fields: + +* ``self.uuid`` +* ``self.id`` + +The difference is that the ``uuid`` is generated by the runtime, guaranteed to be unique and can't be modified. While the ``id`` is modifiable by the user, and defaults to the Actor class name. You can retrieve Actors by both UUID and ID using the ``ActorRegistry``, see the section further down for details. + +Messages and immutability +------------------------- + +**IMPORTANT**: Messages can be any kind of object but have to be immutable. Scala can’t enforce immutability (yet) so this has to be by convention. Primitives like String, Int, Boolean are always immutable. Apart from these the recommended approach is to use Scala case classes which are immutable (if you don’t explicitly expose the state) and works great with pattern matching at the receiver side. + +Here is an example: + +.. code-block:: scala + + // define the case class + case class Register(user: User) + + // create a new case class message + val message = Register(user) + +Other good messages types are ``scala.Tuple2``, ``scala.List``, ``scala.Map`` which are all immutable and great for pattern matching. + +Send messages +------------- + +Messages are sent to an Actor through one of the “bang” methods. + +* ! means “fire-and-forget”, e.g. send a message asynchronously and return immediately. +* !! means “send-and-reply-eventually”, e.g. send a message asynchronously and wait for a reply through aFuture. Here you can specify a timeout. Using timeouts is very important. If no timeout is specified then the actor’s default timeout (set by the this.timeout variable in the actor) is used. This method returns an ``Option[Any]`` which will be either ``Some(result)`` if returning successfully or None if the call timed out. +* !!! sends a message asynchronously and returns a ``Future``. + +You can check if an Actor can handle a specific message by invoking the ``isDefinedAt`` method: + +.. code-block:: scala + + if (actor.isDefinedAt(message)) actor ! message + else ... + +Fire-forget +^^^^^^^^^^^ + +This is the preferred way of sending messages. No blocking waiting for a message. This gives the best concurrency and scalability characteristics. + +.. code-block:: scala + + actor ! "Hello" + +If invoked from within an Actor, then the sending actor reference will be implicitly passed along with the message and available to the receiving Actor in its ``sender: Option[AnyRef]`` member field. He can use this to reply to the original sender or use the ``reply(message: Any)`` method. + +If invoked from an instance that is **not** an Actor there will be no implicit sender passed along the message and you will get an IllegalStateException if you call ``self.reply(..)``. + +Send-And-Receive-Eventually +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using ``!!`` will send a message to the receiving Actor asynchronously but it will wait for a reply on a ``Future``, blocking the sender Actor until either: + +* A reply is received, or +* The Future times out + +You can pass an explicit time-out to the ``!!`` method and if none is specified then the default time-out defined in the sender Actor will be used. + +The ``!!`` method returns an ``Option[Any]`` which will be either ``Some(result)`` if returning successfully, or ``None`` if the call timed out. +Here are some examples: + +.. code-block:: scala + + val resultOption = actor !! ("Hello", 1000) + if (resultOption.isDefined) ... // handle reply + else ... // handle timeout + + val result: Option[String] = actor !! "Hello" + resultOption match { + case Some(reply) => ... // handle reply + case None => ... // handle timeout + } + + val result = (actor !! "Hello").getOrElse(throw new RuntimeException("TIMEOUT")) + + (actor !! "Hello").foreach(result => ...) // handle result + +Send-And-Receive-Future +^^^^^^^^^^^^^^^^^^^^^^^ + +Using ``!!!`` will send a message to the receiving Actor asynchronously and will return a 'Future': + +.. code-block:: scala + + val future = actor !!! "Hello" + +See `Futures `_ for more information. + +Forward message +^^^^^^^^^^^^^^^ + +You can forward a message from one actor to another. This means that the original sender address/reference is maintained even though the message is going through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc. + +.. code-block:: scala + + actor.forward(message) + +Receive messages +---------------- + +An Actor has to implement the ``receive`` method to receive messages: + +.. code-block:: scala + + protected def receive: PartialFunction[Any, Unit] + +Note: Akka has an alias to the ``PartialFunction[Any, Unit]`` type called ``Receive`` (``akka.actor.Actor.Receive``), so you can use this type instead for clarity. But most often you don't need to spell it out. + +This method should return a ``PartialFunction``, e.g. a ‘match/case’ clause in which the message can be matched against the different case clauses using Scala pattern matching. Here is an example: + +.. code-block:: scala + + class MyActor extends Actor { + def receive = { + case "Hello" => + log.info("Received 'Hello'") + + case _ => + throw new RuntimeException("unknown message") + } + } + +Actor internal API +------------------ + +The Actor trait contains almost no member fields or methods to invoke, you just use the Actor trait to implement the: + +#. ``receive`` message handler +#. life-cycle callbacks: + + #. preStart + #. postStop + #. preRestart + #. postRestart + +The ``Actor`` trait has one single member field (apart from the ``log`` field from the mixed in ``Logging`` trait): + +.. code-block:: scala + + val self: ActorRef + +This ``self`` field holds a reference to its ``ActorRef`` and it is this reference you want to access the Actor's API. Here, for example, you find methods to reply to messages, send yourself messages, define timeouts, fault tolerance etc., start and stop etc. + +However, for convenience you can import these functions and fields like below, which will allow you do drop the ``self`` prefix: + +.. code-block:: scala + + class MyActor extends Actor { + import self._ + id = ... + dispatcher = ... + start + ... + } + +But in this documentation we will always prefix the calls with ``self`` for clarity. + +Let's start by looking how we can reply to messages in a convenient way using this ``ActorRef`` API. + +Reply to messages +----------------- + +Reply using the reply and reply\_? methods +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to send a message back to the original sender of the message you just received then you can use the ``reply(..)`` method. + +.. code-block:: scala + + case request => + val result = process(request) + self.reply(result) + +In this case the ``result`` will be send back to the Actor that sent the ``request``. + +The ``reply`` method throws an ``IllegalStateException`` if unable to determine what to reply to, e.g. the sender is not an actor. You can also use the more forgiving ``reply_?`` method which returns ``true`` if reply was sent, and ``false`` if unable to determine what to reply to. + +.. code-block:: scala + + case request => + val result = process(request) + if (self.reply_?(result)) ...// success + else ... // handle failure + +Reply using the sender reference +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If the sender is an Actor then its reference will be implicitly passed along together with the message and will end up in the ``sender: Option[ActorRef]`` member field in the ``ActorRef``. This means that you can use this field to send a message back to the sender. + +.. code-block:: scala + + // receiver code + case request => + val result = process(request) + self.sender.get ! result + +It's important to know that ``sender.get`` will throw an exception if the ``sender`` is not defined, e.g. the ``Option`` is ``None``. You can check if it is defined by invoking the ``sender.isDefined`` method, but a more elegant solution is to use ``foreach`` which will only be executed if the sender is defined in the ``sender`` member ``Option`` field. If it is not, then the operation in the ``foreach`` method is ignored. + +.. code-block:: scala + + // receiver code + case request => + val result = process(request) + self.sender.foreach(_ ! result) + +The same pattern holds for using the ``senderFuture`` in the section below. + +Reply using the sender future +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If a message was sent with the ``!!`` or ``!!!`` methods, which both implements request-reply semantics using Future's, then you either have the option of replying using the ``reply`` method as above. This method will then resolve the Future. But you can also get a reference to the Future directly and resolve it yourself or if you would like to store it away to resolve it later, or pass it on to some other Actor to resolve it. + +The reference to the Future resides in the ``senderFuture: Option[CompletableFuture[_]]`` member field in the ``ActorRef`` class. + +Here is an example of how it can be used: + +.. code-block:: scala + + case request => + try { + val result = process(request) + self.senderFuture.foreach(_.completeWithResult(result)) + } catch { + case e => + senderFuture.foreach(_.completeWithException(this, e)) + } + +Reply using the channel +^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to have a handle to an object to whom you can reply to the message, you can use the ``Channel`` abstraction. +Simply call ``self.channel`` and then you can forward that to others, store it away or otherwise until you want to reply, which you do by ``Channel ! response``: + +.. code-block:: scala + + case request => + val result = process(request) + self.channel ! result + +.. code-block:: scala + + case request => + friend forward self.channel + +Summary of reply semantics and options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* ``self.reply(...)`` can be used to reply to an ``Actor`` or a ``Future``. +* ``self.sender`` is a reference to the ``Actor`` you can reply to, if it exists +* ``self.senderFuture`` is a reference to the ``Future`` you can reply to, if it exists +* ``self.channel`` is a reference providing an abstraction to either ``self.sender`` or ``self.senderFuture`` if one is set, providing a single reference to store and reply to (the reference equivalent to the ``reply(...)`` method). +* ``self.sender`` and ``self.senderFuture`` will never be set at the same time, as there can only be one reference to accept a reply. + +Initial receive timeout +----------------------- + +A timeout mechanism can be used to receive a message when no initial message is received within a certain time. To receive this timeout you have to set the ``receiveTimeout`` property and declare a case handing the ReceiveTimeout object. + +.. code-block:: scala + + self.receiveTimeout = Some(30000L) // 30 seconds + + def receive = { + case "Hello" => + log.info("Received 'Hello'") + case ReceiveTimeout => + throw new RuntimeException("received timeout") + } + +This mechanism also work for hotswapped receive functions. Every time a ``HotSwap`` is sent, the receive timeout is reset and rescheduled. + +Starting actors +--------------- + +Actors are started by invoking the ``start`` method. + +.. code-block:: scala + + val actor = actorOf[MyActor] + actor.start() + +You can create and start the ``Actor`` in a oneliner like this: + +.. code-block:: scala + + val actor = actorOf[MyActor].start() + +When you start the ``Actor`` then it will automatically call the ``def preStart`` callback method on the ``Actor`` trait. This is an excellent place to add initialization code for the actor. + +.. code-block:: scala + + override def preStart = { + ... // initialization code + } + +Stopping actors +--------------- + +Actors are stopped by invoking the ``stop`` method. + +.. code-block:: scala + + actor.stop() + +When stop is called then a call to the ``def postStop`` callback method will take place. The ``Actor`` can use this callback to implement shutdown behavior. + +.. code-block:: scala + + override def postStop = { + ... // clean up resources + } + +You can shut down all Actors in the system by invoking: + +.. code-block:: scala + + Actor.registry.shutdownAll() + + +PoisonPill +---------- + +You can also send an actor the ``akka.actor.PoisonPill`` message, which will stop the actor when the message is processed. + +If the sender is a ``Future`` (e.g. the message is sent with ``!!`` or ``!!!``), the ``Future`` will be completed with an ``akka.actor.ActorKilledException("PoisonPill")``. + +HotSwap +------- + +Upgrade +^^^^^^^ + +Akka supports hotswapping the Actor’s message loop (e.g. its implementation) at runtime. There are two ways you can do that: + +* Send a ``HotSwap`` message to the Actor. +* Invoke the ``become`` method from within the Actor. + +Both of these takes a ``ActorRef => PartialFunction[Any, Unit]`` that implements the new message handler. The hotswapped code is kept in a Stack which can be pushed and popped. + +To hotswap the Actor body using the ``HotSwap`` message: + +.. code-block:: scala + + actor ! HotSwap( self => { + case message => self.reply("hotswapped body") + }) + +Using the ``HotSwap`` message for hotswapping has its limitations. You can not replace it with any code that uses the Actor's ``self`` reference. If you need to do that the the ``become`` method is better. + +To hotswap the Actor using ``become``: + +.. code-block:: scala + + def angry: Receive = { + case "foo" => self reply "I am already angry!!!" + case "bar" => become(happy) + } + + def happy: Receive = { + case "bar" => self reply "I am already happy :-)" + case "foo" => become(angry) + } + + def receive = { + case "foo" => become(angry) + case "bar" => become(happy) + } + +The ``become`` method is useful for many different things, but a particular nice example of it is in example where it is used to implement a Finite State Machine (FSM): `Dining Hakkers `_ + +Here is another little cute example of ``become`` and ``unbecome`` in action: + +.. code-block:: scala + + case object Swap + class Swapper extends Actor { + def receive = { + case Swap => + println("Hi") + become { + case Swap => + println("Ho") + unbecome() // resets the latest 'become' (just for fun) + } + } + } + + val swap = actorOf[Swapper].start() + + swap ! Swap // prints Hi + swap ! Swap // prints Ho + swap ! Swap // prints Hi + swap ! Swap // prints Ho + swap ! Swap // prints Hi + swap ! Swap // prints Ho + +Encoding Scala Actors nested receives without accidentally leaking memory: `UnnestedReceive `_ +------------------------------------------------------------------------------------------------------------------------------ + +Downgrade +^^^^^^^^^ + +Since the hotswapped code is pushed to a Stack you can downgrade the code as well. There are two ways you can do that: + +* Send the Actor a ``RevertHotswap`` message +* Invoke the ``unbecome`` method from within the Actor. + +Both of these will pop the Stack and replace the Actor's implementation with the ``PartialFunction[Any, Unit]`` that is at the top of the Stack. + +Revert the Actor body using the ``RevertHotSwap`` message: + +.. code-block:: scala + + actor ! RevertHotSwap + +Revert the Actor body using the ``unbecome`` method: + +.. code-block:: scala + + def receive: Receive = { + case "revert" => unbecome() + } + +Killing an Actor +---------------- + +You can kill an actor by sending a ``Kill`` message. This will restart the actor through regular supervisor semantics. + +Use it like this: + +.. code-block:: scala + + // kill the actor called 'victim' + victim ! Kill + +Actor life-cycle +---------------- + +The actor has a well-defined non-circular life-cycle. + +:: + + NEW (newly created actor) - can't receive messages (yet) + => STARTED (when 'start' is invoked) - can receive messages + => SHUT DOWN (when 'exit' or 'stop' is invoked) - can't do anything + +Extending Actors using PartialFunction chaining +----------------------------------------------- + +A bit advanced but very useful way of defining a base message handler and then extend that, either through inheritance or delegation, is to use ``PartialFunction.orElse`` chaining. + +In generic base Actor: + +.. code-block:: scala + + import akka.actor.Actor.Receive + + abstract class GenericActor extends Actor { + // to be defined in subclassing actor + def specificMessageHandler: Receive + + // generic message handler + def genericMessageHandler: Receive = { + case event => printf("generic: %s\n", event) + } + + def receive = specificMessageHandler orElse genericMessageHandler + } + +In subclassing Actor: + +.. code-block:: scala + + class SpecificActor extends GenericActor { + def specificMessageHandler = { + case event: MyMsg => printf("specific: %s\n", event.subject) + } + } + + case class MyMsg(subject: String) diff --git a/akka-docs/pending/agents-scala.rst b/akka-docs/pending/agents-scala.rst new file mode 100644 index 0000000000..9adb9e9f81 --- /dev/null +++ b/akka-docs/pending/agents-scala.rst @@ -0,0 +1,121 @@ +Agents (Scala) +============== + +Module stability: **SOLID** + +Agents in Akka were inspired by `agents in Clojure `_. + +Agents provide asynchronous change of individual locations. Agents are bound to a single storage location for their lifetime, and only allow mutation of that location (to a new state) to occur as a result of an action. Update actions are functions that are asynchronously applied to the Agent's state and whose return value becomes the Agent's new state. The state of an Agent should be immutable. + +While updates to Agents are asynchronous, the state of an Agent is always immediately available for reading by any thread (using ``get`` or ``apply``) without any messages. + +Agents are reactive. The update actions of all Agents get interleaved amongst threads in a thread pool. At any point in time, at most one ``send`` action for each Agent is being executed. Actions dispatched to an agent from another thread will occur in the order they were sent, potentially interleaved with actions dispatched to the same agent from other sources. + +If an Agent is used within an enclosing transaction, then it will participate in that transaction. Agents are integrated with the STM - any dispatches made in a transaction are held until that transaction commits, and are discarded if it is retried or aborted. + +Creating and stopping Agents +---------------------------- + +Agents are created by invoking ``Agent(value)`` passing in the Agent's initial value. + +.. code-block:: scala + + val agent = Agent(5) + +An Agent will be running until you invoke ``close`` on it. Then it will be eligible for garbage collection (unless you hold on to it in some way). + +.. code-block:: scala + + agent.close + +Updating Agents +--------------- + +You update an Agent by sending a function that transforms the current value or by sending just a new value. The Agent will apply the new value or function atomically and asynchronously. The update is done in a fire-forget manner and you are only guaranteed that it will be applied. There is no guarantee of when the update will be applied but dispatches to an Agent from a single thread will occur in order. You apply a value or a function by invoking the ``send`` function. + +.. code-block:: scala + + // send a value + agent send 7 + + // send a function + agent send (_ + 1) + agent send (_ * 2) + +You can also dispatch a function to update the internal state but on its own thread. This does not use the reactive thread pool and can be used for long-running or blocking operations. You do this with the ``sendOff`` method. Dispatches using either ``sendOff`` or ``send`` will still be executed in order. + +.. code-block:: scala + + // sendOff a function + agent sendOff (longRunningOrBlockingFunction) + +Reading an Agent's value +------------------------ + +Agents can be dereferenced, e.g. you can get an Agent's value, by invoking the Agent with parenthesis like this: + +.. code-block:: scala + + val result = agent() + +Or by using the get method. + +.. code-block:: scala + + val result = agent.get + +Reading an Agent's current value does not involve any message passing and happens immediately. So while updates to an Agent are asynchronous, reading the state of an Agent is synchronous. + +Awaiting an Agent's value +------------------------- + +It is also possible to read the value after all currently queued ``send``\s have completed. You can do this with ``await``: + +.. code-block:: scala + + val result = agent.await + +You can also get a ``Future`` to this value, that will be completed after the currently queued updates have completed: + +.. code-block:: scala + + val future = agent.future + // ... + val result = future.await.result.get + +Transactional Agents +-------------------- + +If an Agent is used within an enclosing transaction, then it will participate in that transaction. If you send to an Agent within a transaction then the dispatch to the Agent will be held until that transaction commits, and discarded if the transaction is aborted. + +Monadic usage +------------- + +Agents are also monadic, allowing you to compose operations using for-comprehensions. In a monadic usage, new Agents are created leaving the original Agents untouched. So the old values (Agents) are still available as-is. They are so-called 'persistent'. + +Example of a monadic usage: + +.. code-block:: scala + + val agent1 = Agent(3) + val agent2 = Agent(5) + + // uses foreach + for (value <- agent1) { + result = value + 1 + } + + // uses map + val agent3 = + for (value <- agent1) yield value + 1 + + // uses flatMap + val agent4 = for { + value1 <- agent1 + value2 <- agent2 + } yield value1 + value2 + + agent1.close + agent2.close + agent3.close + agent4.close diff --git a/akka-docs/pending/articles.rst b/akka-docs/pending/articles.rst new file mode 100644 index 0000000000..91138b404c --- /dev/null +++ b/akka-docs/pending/articles.rst @@ -0,0 +1,125 @@ +Articles & Presentations +======================== + +Videos +------ + +`Functional Programming eXchange - March 2011 `_ + +`NE Scala - Feb 2011 `_ + +`JFokus - Feb 2011 `_. + +`London Scala User Group - Oct 2010 `_ + +`Akka LinkedIn Tech Talk - Sept 2010 `_ + +`Akka talk at Scala Days - March 2010 `_ + +Articles +-------- + +`Remote Actor Class Loading with Akka `_ + +`Akka Producer Actors: New Features and Best Practices `_ + +`Akka Consumer Actors: New Features and Best Practices `_ + +`Compute Grid with Cloudy Akka `_ + +`Clustered Actors with Cloudy Akka `_ + +`Unit testing Akka Actors with the TestKit `_ + +`Starting with Akka 1.0 `_ + +`Akka Does Async `_ + +`CQRS with Akka actors and functional domain models `_ + +`High Level Concurrency with JRuby and Akka Actors `_ + +`Container-managed actor dispatchers `_ + +`Even simpler scalability with Akka through RegistryActor `_ + +`FSM in Akka (in Vietnamese) `_ + +`Repeater and Idempotent Receiver implementation in Akka `_ + +`EDA Akka as EventBus `_ + +`Upgrading examples to Akka master (0.10) and Scala 2.8.0 Final `_ + +`Testing Akka Remote Actor using Serializable.Protobuf `_ + +`Flexible load balancing with Akka in Scala `_ + +`Eventually everything, and actors `_ + +`Join messages with Akka `_ + +`Starting with Akka part 2, Intellij IDEA, Test Driven Development `_ + +`Starting with Akka and Scala `_ + +`PubSub using Redis and Akka Actors `_ + +`Akka's grown-up hump `_ + +`Akka features for application integration `_ + +`Load Balancing Actors with Work Stealing Techniques `_ + +`Domain Services and Bounded Context using Akka - Part 2 `_ + +`Thinking Asynchronous - Domain Modeling using Akka Transactors - Part 1 `_ + +`Introducing Akka – Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors `_ + +`Using Cassandra with Scala and Akka `_ + +`No Comet, Hacking with WebSocket and Akka `_ + +`MongoDB for Akka Persistence `_ + +`Pluggable Persistent Transactors with Akka `_ + +`Enterprise scala actors: introducing the Akka framework `_ + +Books +----- + +`Akka and Camel `_ (appendix E of `Camel in Action `_) +`Ett första steg i Scala `_ (Kapitel "Aktörer och Akka") (en. "A first step in Scala", chapter "Actors and Akka", book in Swedish) + +Presentations +------------- + +`Slides from Akka talk at Scala Days 2010, good short intro to Akka `_ + +`Akka: Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors `_ + +``_ + +``_ + +Podcasts +-------- + +`Episode 16 – Scala and Akka an Interview with Jonas Boner `_ + +`Jonas Boner on the Akka framework, Scala, and highly scalable applications `_ + +Interviews +---------- + +`JetBrains/DZone interview: Talking about Akka, Scala and life with Jonas Bonér `_ + +`Artima interview of Jonas on Akka 1.0 `_ + +`InfoQ interview of Jonas on Akka 1.0 `_ + +`InfoQ interview of Jonas on Akka 0.7 `_ + +``_ diff --git a/akka-docs/pending/benchmarks.rst b/akka-docs/pending/benchmarks.rst new file mode 100644 index 0000000000..6352040d32 --- /dev/null +++ b/akka-docs/pending/benchmarks.rst @@ -0,0 +1,31 @@ +Benchmarks +========== + +Scalability, Throughput and Latency benchmark +--------------------------------------------- + +``_ + +Simple Trading system. +* `Here is the result with some graphs `_ +* `Here is the article `_ +* `Here is the code `_ + +Compares: +* Synchronous Scala solution +* Scala library Actors +** Fire-forget +** Request-reply +* Akka +** Request-reply +** Fire-forget with default dispatcher +** Fire-forget with Hawt dispatcher + +Performance benchmark +--------------------- + +Benchmarking Akka against: +* Scala Library Actors +* Raw Java concurrency +* Jetlang (Java actors lib) +``_ diff --git a/akka-docs/pending/building-akka.rst b/akka-docs/pending/building-akka.rst new file mode 100644 index 0000000000..31af34c687 --- /dev/null +++ b/akka-docs/pending/building-akka.rst @@ -0,0 +1,319 @@ +Building Akka +============= + +This page describes how to build and run Akka from the latest source code. + +Get the source code +------------------- + +Akka uses `Git `_ and is hosted at `Github `_. + +You first need Git installed on your machine. You can then clone the source repositories: +* Akka repository from ``_ +* Akka Modules repository from ``_ + +For example: + +:: + + git clone git://github.com/jboner/akka.git + git clone git://github.com/jboner/akka-modules.git + +If you have already cloned the repositories previously then you can update the code with ``git pull``: + +:: + + git pull origin master + +SBT - Simple Build Tool +----------------------- + +Akka is using the excellent `SBT `_ build system. So the first thing you have to do is to download and install SBT. You can read more about how to do that `here `_ . + +The SBT commands that you'll need to build Akka are all included below. If you want to find out more about SBT and using it for your own projects do read the `SBT documentation `_. + +The Akka SBT build file is ``project/build/AkkaProject.scala`` with some properties defined in ``project/build.properties``. + +---- + +Building Akka +------------- + +First make sure that you are in the akka code directory: + +:: + + cd akka + +Fetching dependencies +^^^^^^^^^^^^^^^^^^^^^ + +SBT does not fetch dependencies automatically. You need to manually do this with the ``update`` command: + +:: + + sbt update + +Once finished, all the dependencies for Akka will be in the ``lib_managed`` directory under each module: akka-actor, akka-stm, and so on. + +*Note: you only need to run {{update}} the first time you are building the code, or when the dependencies have changed.* + +Building +^^^^^^^^ + +To compile all the Akka core modules use the ``compile`` command: + +:: + + sbt compile + +You can run all tests with the ``test`` command: + +:: + + sbt test + +If compiling and testing are successful then you have everything working for the latest Akka development version. + +Publish to local Ivy repository +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to deploy the artifacts to your local Ivy repository (for example, to use from an SBT project) use the ``publish-local`` command: + +:: + + sbt publish-local + +Publish to local Maven repository +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to deploy the artifacts to your local Maven repository use: + +:: + + sbt publish-local publish + +SBT interactive mode +^^^^^^^^^^^^^^^^^^^^ + +Note that in the examples above we are calling ``sbt compile`` and ``sbt test`` and so on. SBT also has an interactive mode. If you just run ``sbt`` you enter the interactive SBT prompt and can enter the commands directly. This saves starting up a new JVM instance for each command and can be much faster and more convenient. + +For example, building Akka as above is more commonly done like this: + +:: + + % sbt + [info] Building project akka 1.1-SNAPSHOT against Scala 2.8.1 + [info] using AkkaParentProject with sbt 0.7.5.RC0 and Scala 2.7.7 + > update + [info] + [info] == akka-actor / update == + ... + [success] Successful. + [info] + [info] Total time ... + > compile + ... + > test + ... + +SBT batch mode +^^^^^^^^^^^^^^ + +It's also possible to combine commands in a single call. For example, updating, testing, and publishing Akka to the local Ivy repository can be done with: + +:: + + sbt update test publish-local + +---- + +Building Akka Modules +--------------------- + +To build Akka Modules first build and publish Akka to your local Ivy repository as described above. Or using: + +:: + + cd akka + sbt update publish-local + +Then you can build Akka Modules using the same steps as building Akka. First update to get all dependencies (including the Akka core modules), then compile, test, or publish-local as needed. For example: + +:: + + cd akka-modules + sbt update publish-local + +Microkernel distribution +^^^^^^^^^^^^^^^^^^^^^^^^ + +To build the Akka Modules microkernel (the same as the Akka Modules distribution download) use the ``dist`` command: + +:: + + sbt dist + +The distribution zip can be found in the dist directory and is called ``akka-modules-{version}.zip``. + +To run the mircokernel, unzip the zip file, change into the unzipped directory, set the ``AKKA_HOME`` environment variable, and run the main jar file. For example: + +:: + + unzip dist/akka-modules-1.1-SNAPSHOT.zip + cd akka-modules-1.1-SNAPSHOT + export AKKA_HOME=`pwd` + java -jar akka-modules-1.1-SNAPSHOT.jar + +The microkernel will boot up and install the sample applications that reside in the distribution's ``deploy`` directory. You can deploy your own applications into the ``deploy`` directory as well. + +---- + +Scripts +------- + +Linux/Unix init script +^^^^^^^^^^^^^^^^^^^^^^ + +Here is a Linux/Unix init script that can be very useful: + +``_ + +Copy and modify as needed. + +Simple startup shell script +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This little script might help a bit. Just make sure you have the Akka distribution in the '$AKKA_HOME/dist' directory and then invoke this script to start up the kernel. The distribution is created in the './dist' dir for you if you invoke 'sbt dist'. + +``_ + +Copy and modify as needed. + +---- + +Dependencies +------------ + +If you are managing dependencies by hand you can find out what all the compile dependencies are for each module by looking in the ``lib_managed/compile`` directories. For example, you can run this to create a listing of dependencies (providing you have the source code and have run ``sbt update``): + +:: + + cd akka + ls -1 */lib_managed/compile + +Here are the dependencies used by the Akka core modules. + +akka-actor +^^^^^^^^^^ + +* No dependencies + +akka-stm +^^^^^^^^ + +* Depends on akka-actor +* multiverse-alpha-0.6.2.jar + +akka-typed-actor +^^^^^^^^^^^^^^^^ + +* Depends on akka-stm +* aopalliance-1.0.jar +* aspectwerkz-2.2.3.jar +* guice-all-2.0.jar + +akka-remote +^^^^^^^^^^^ + +* Depends on akka-typed-actor +* commons-codec-1.4.jar +* commons-io-2.0.1.jar +* dispatch-json_2.8.1-0.7.8.jar +* guice-all-2.0.jar +* h2-lzf-1.0.jar +* jackson-core-asl-1.7.1.jar +* jackson-mapper-asl-1.7.1.jar +* junit-4.8.1.jar +* netty-3.2.3.Final.jar +* objenesis-1.2.jar +* protobuf-java-2.3.0.jar +* sjson_2.8.1-0.9.1.jar + +akka-http +^^^^^^^^^ + +* Depends on akka-remote +* jsr250-api-1.0.jar +* jsr311-api-1.1.jar + +---- +Here are the dependencies used by the Akka modules. + +akka-amqp +^^^^^^^^^ + +* Depends on akka-remote +* commons-cli-1.1.jar +* amqp-client-1.8.1.jar + +akka-camel +^^^^^^^^^^ + +* Depends on akka-actor +* camel-core-2.5.0.jar +* commons-logging-api-1.1.jar +* commons-management-1.0.jar + +akka-camel-typed +^^^^^^^^^^^^^^^^ + +* Depends on akka-typed-actor +* camel-core-2.5.0.jar +* commons-logging-api-1.1.jar +* commons-management-1.0.jar + +akka-spring +^^^^^^^^^^^ + +* Depends on akka-camel +* akka-camel-typed +* commons-logging-1.1.1.jar +* spring-aop-3.0.4.RELEASE.jar +* spring-asm-3.0.4.RELEASE.jar +* spring-beans-3.0.4.RELEASE.jar +* spring-context-3.0.4.RELEASE.jar +* spring-core-3.0.4.RELEASE.jar +* spring-expression-3.0.4.RELEASE.jar + +akka-scalaz +^^^^^^^^^^^ + +* Depends on akka-actor +* hawtdispatch-1.1.jar +* hawtdispatch-scala-1.1.jar +* scalaz-core_2.8.1-6.0-SNAPSHOT.jar + +akka-kernel +^^^^^^^^^^^ + +* Depends on akka-http, akka-amqp, and akka-spring +* activation-1.1.jar +* asm-3.1.jar +* jaxb-api-2.1.jar +* jaxb-impl-2.1.12.jar +* jersey-core-1.3.jar +* jersey-json-1.3.jar +* jersey-scala-1.3.jar +* jersey-server-1.3.jar +* jettison-1.1.jar +* jetty-continuation-7.1.6.v20100715.jar +* jetty-http-7.1.6.v20100715.jar +* jetty-io-7.1.6.v20100715.jar +* jetty-security-7.1.6.v20100715.jar +* jetty-server-7.1.6.v20100715.jar +* jetty-servlet-7.1.6.v20100715.jar +* jetty-util-7.1.6.v20100715.jar +* jetty-xml-7.1.6.v20100715.jar +* servlet-api-2.5.jar +* stax-api-1.0.1.jar diff --git a/akka-docs/pending/buildr.rst b/akka-docs/pending/buildr.rst new file mode 100644 index 0000000000..a684463270 --- /dev/null +++ b/akka-docs/pending/buildr.rst @@ -0,0 +1,55 @@ +Using Akka in a Buildr project +============================== + +This is an example on how to use Akka in a project based on Buildr + +.. code-block:: ruby + + require 'buildr/scala' + + VERSION_NUMBER = "0.6" + GROUP = "se.scalablesolutions.akka" + + repositories.remote << "http://www.ibiblio.org/maven2/" + repositories.remote << "http://www.lag.net/repo" + repositories.remote << "http://multiverse.googlecode.com/svn/maven-repository/releases" + + AKKA = group('akka-core', 'akka-comet', 'akka-util','akka-kernel', 'akka-rest', 'akka-util-java', + 'akka-security','akka-persistence-common', 'akka-persistence-redis', + 'akka-amqp', + :under=> 'se.scalablesolutions.akka', + :version => '0.6') + ASPECTJ = "org.codehaus.aspectwerkz:aspectwerkz-nodeps-jdk5:jar:2.1" + SBINARY = "sbinary:sbinary:jar:0.3" + COMMONS_IO = "commons-io:commons-io:jar:1.4" + CONFIGGY = "net.lag:configgy:jar:1.4.7" + JACKSON = group('jackson-core-asl', 'jackson-mapper-asl', + :under=> 'org.codehaus.jackson', + :version => '1.2.1') + MULTIVERSE = "org.multiverse:multiverse-alpha:jar:jar-with-dependencies:0.3" + NETTY = "org.jboss.netty:netty:jar:3.2.0.ALPHA2" + PROTOBUF = "com.google.protobuf:protobuf-java:jar:2.2.0" + REDIS = "com.redis:redisclient:jar:1.0.1" + SJSON = "sjson.json:sjson:jar:0.3" + + Project.local_task "run" + + desc "Akka Chat Sample Module" + define "akka-sample-chat" do + project.version = VERSION_NUMBER + project.group = GROUP + + compile.with AKKA, CONFIGGY + + p artifact(MULTIVERSE).to_s + + package(:jar) + + task "run" do + Java.java "scala.tools.nsc.MainGenericRunner", + :classpath => [ compile.dependencies, compile.target, + ASPECTJ, COMMONS_IO, JACKSON, NETTY, MULTIVERSE, PROTOBUF, REDIS, + SBINARY, SJSON], + :java_args => ["-server"] + end + end diff --git a/akka-docs/pending/cluster-membership.rst b/akka-docs/pending/cluster-membership.rst new file mode 100644 index 0000000000..6aa70e8bce --- /dev/null +++ b/akka-docs/pending/cluster-membership.rst @@ -0,0 +1,89 @@ +Cluster Membership (Scala) +========================== + +Module stability: **IN PROGRESS** + +Akka supports a Cluster Membership through a `JGroups `_ based implementation. JGroups is is a `P2P `_ clustering API + +Configuration +------------- + +The cluster is configured in 'akka.conf' by adding the Fully Qualified Name (FQN) of the actor class and serializer: + +.. code-block:: ruby + + remote { + cluster { + service = on + name = "default" # The name of the cluster + serializer = "akka.serialization.Serializer$Java" # FQN of the serializer class + } + } + +How to join the cluster +----------------------- + +The node joins the cluster when the 'RemoteNode' and/or 'RemoteServer' servers are started. + +Cluster API +----------- + +Interaction with the cluster is done through the 'akka.remote.Cluster' object. + +To send a message to all actors of a specific type on other nodes in the cluster use the 'relayMessage' function: + +.. code-block:: scala + + def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit + +Here is an example: + +.. code-block:: scala + + Cluster.relayMessage(classOf[ATypeOfActor], message) + +Traversing the remote nodes in the cluster to spawn remote actors: + +Cluster.foreach: + +.. code-block:: scala + + def foreach(f : (RemoteAddress) => Unit) : Unit + +Here's an example: + +.. code-block:: scala + + for(endpoint <- Cluster) spawnRemote[KungFuActor](endpoint.hostname,endpoint.port) + +and: + +.. code-block:: scala + + Cluster.foreach( endpoint => spawnRemote[KungFuActor](endpoint.hostname,endpoint.port) ) + +Cluster.lookup: + +.. code-block:: scala + + def lookup[T](handleRemoteAddress : PartialFunction[RemoteAddress, T]) : Option[T] + +Here is an example: + +.. code-block:: scala + + val myRemoteActor: Option[SomeActorType] = Cluster.lookup({ + case RemoteAddress(hostname, port) => spawnRemote[SomeActorType](hostname, port) + }) + + myRemoteActor.foreach(remoteActor => ...) + +Here is another example: + +.. code-block:: scala + Cluster.lookup({ + case remoteAddress @ RemoteAddress(_,_) => remoteAddress + }) match { + case Some(remoteAddress) => spawnAllRemoteActors(remoteAddress) + case None => handleNoRemoteNodeFound + } diff --git a/akka-docs/pending/companies-using-akka.rst b/akka-docs/pending/companies-using-akka.rst new file mode 100644 index 0000000000..3832adab1b --- /dev/null +++ b/akka-docs/pending/companies-using-akka.rst @@ -0,0 +1,170 @@ +Companies and Open Source projects using Akka +============================================= + +Production Users +**************** + +These are some of the production Akka users that are able to talk about their use publicly. + +CSC +--- + +CSC is a global provider of information technology services. The Traffic Management business unit in the Netherlands is a systems integrator for the implementation of Traffic Information and Traffic Enforcement Systems, such as section control, weigh in motion, travel time and traffic jam detection and national data warehouse for traffic information. CSC Traffic Management is using Akka for their latest Traffic Information and Traffic Enforcement Systems. + +``_ + +*"Akka has been in use for almost a year now (since 0.7) and has been used successfully for two projects so far. Akka has enabled us to deliver very flexible, scalable and high performing systems with as little friction as possible. The Actor model has simplified a lot of concerns in the type of systems that we build and is now part of our reference architecture. With Akka we deliver systems that meet the most strict performance requirements of our clients in a near-realtime environment. We have found the Akka framework and it's support team invaluable."* + +Thatcham Motor Insurance Repair Research Centre +----------------------------------------------- + +Thatcham is a EuroNCAP member. They research efficient, safe, cost effective repair of vehicles, and work with manufacturers to influence the design of new vehicles Thatcham are using Akka as the implementation for their distributed modules. All Scala based research software now talks to an Akka based publishing platform. Using Akka enables Thatcham to 'free their domain', and ensures that the platform is cloud enabled and scalable, and that the team is confident that they are flexible. Akka has been in use, tested under load at Thatcham for almost a year, with no problems migrating up through the different versions. An old website currently under redesign on a new Scala powered platform: `www.thatcham.org `_ + +*“We have been in production with Akka for over 18 months with zero downtime. The core is rock solid, never a problem, performance is great, integration capabilities are diverse and ever growing, and the toolkit is just a pleasure to work with. Combine that with the excellent response you get from the devs and users on this list and you have a winner. Absolutely no regrets on our part for choosing to work with Akka.”* + +*"Scala and Akka are now enabling improvements in the standard of vehicle damage assessment, and in the safety of vehicle repair across the UK, with Europe, USA, Asia and Australasia to follow. Thatcham (Motor Insurance Repair Research Centre) are delivering crash specific information with linked detailed repair information for over 7000 methods.* + +*For Thatcham, the technologies enable scalability and elegance when dealing with complicated design constraints. Because of the complexity of interlinked methods, caching is virtually impossible in most cases, so in steps the 'actors' paradigm. Where previously something like JMS would have provided a stable but heavyweight, rigid solution, Thatcham are now more flexible, and can expand into the cloud in a far simpler, more rewarding way.* + +*Thatcham's customers, body shop repairers and insurers receive up to date repair information in the form of crash repair documents of the quality necessary to ensure that every vehicle is repaired back to the original safety standard. In a market as important as this, availability is key, as is performance. Scala and Akka have delivered consistently so far.* + +*While recently introduced, growing numbers of UK repairers are receiving up to date repair information from this service, with the rest to follow shortly. Plans are already in motion to build new clusters to roll the service out across Europe, USA, Asia and Australasia.* + +*The sheer opportunities opened up to teams by Scala and Akka, in terms of integration, concise expression of intent and scalability are of huge benefit."* + +SVT (Swedish Television) +------------------------ + +``_ + +*“I’m currently working in a project at the Swedish Television where we’re developing a subtitling system with collaboration capabilities similar to Google Wave. It’s a mission critical system and the design and server implementation is all based on Akka and actors etc. We’ve been running in production for about 6 months and have been upgrading Akka whenever a new release comes out. We’ve never had a single bug due to Akka, and it’s been a pure pleasure to work with. I would choose Akka any day of the week!* + +*Our system is highly asynchronous so the actor style of doing things is a perfect fit. I don’t know about how you feel about concurrency in a big system, but rolling your own abstractions is not a very easy thing to do. When using Akka you can almost forget about all that. Synchronizing between threads, locking and protecting access to state etc. Akka is not just about actors, but that’s one of the most pleasurable things to work with. It’s easy to add new ones and it’s easy to design with actors. You can fire up work actors tied to a specific dispatcher etc. I could make the list of benefits much longer, but I’m at work right now. I suggest you try it out and see how it fits your requirements.* + +*We saw a perfect businness reson for using Akka. It lets you concentrate on the business logic instead of the low level things. It’s easy to teach others and the business intent is clear just by reading the code. We didn’t chose Akka just for fun. It’s a business critical application that’s used in broadcasting. Even live broadcasting. We wouldn’t have been where we are today in such a short time without using Akka. We’re two developers that have done great things in such a short amount of time and part of this is due to Akka. As I said, it lets us focus on the business logic instead of low level things such as concurrency, locking, performence etc."* + +Tapad +----- + +``_ + +*"Tapad is building a real-time ad exchange platform for advertising on mobile and connected devices. Real-time ad exchanges allows for advertisers (among other things) to target audiences instead of buying fixed set of ad slots that will be displayed “randomly” to users. To developers without experience in the ad space, this might seem boring, but real-time ad exchanges present some really interesting technical challenges.* + +*Take for instance the process backing a page view with ads served by a real-time ad exchange auction (somewhat simplified):* + +1. *A user opens a site (or app) which has ads in it.* +2. *As the page / app loads, the ad serving components fires off a request to the ad exchange (this might just be due to an image tag on the page).* +3. *The ad exchange enriches the request with any information about the current user (tracking cookies are often employed for this) and and display context information (“news article about parenting”, “blog about food” etc).* +4. *The ad exchange forwards the enriched request to all bidders registered with the ad exchange.* +5. *The bidders consider the provided user information and responds with what price they are willing to pay for this particular ad slot.* +6. *The ad exchange picks the highest bidder and ensures that the winning bidder’s ad is shown to to user.* + +*Any latency in this process directly influences user experience latency, so this has to happen really fast. All-in-all, the total time should not exceed about 100ms and most ad exchanges allow bidders to spend about 60ms (including network time) to return their bids. That leaves the ad exchange with less than 40ms to facilitate the auction. At Tapad, this happens billions of times per month / tens of thousands of times per second.* + +*Tapad is building bidders which will participate in auctions facilitated by other ad exchanges, but we’re also building our own. We are using Akka in several ways in several parts of the system. Here are some examples:* + +*Plain old parallelization* +*During an auction in the real-time exchange, it’s obvious that all bidders must receive the bid requests in parallel. An auctioneer actor sends the bid requests to bidder actors which in turn handles throttling and eventually IO. We use futures in these requests and the auctioneer discards any responses which arrive too late.* + +*Inside our bidders, we also rely heavily on parallel execution. In order to determine how much to pay for an ad slot, several data stores are queried for information pertinent to the current user. In a “traditional” system, we’d be doing this sequentially, but again, due to the extreme latency constraints, we’re doing this concurrently. Again, this is done with futures and data that is not available in time, get cut from the decision making (and logged :)).* + +*Maintaining state under concurrent load* +*This is probably the de facto standard use case for the actors model. Bidders internal to our system are actors backed by a advertiser campaign. A campaign includes, among other things, budget and “pacing” information. The budget determines how much money to spend for the duration of the campaign, whereas pacing information might set constraints on how quickly or slowly the money should be spent. Ad traffic changes from day to day and from hour to hour and our spending algorithms considers past performance in order to spend the right amount of money at the right time. Needless to say, these algorithms use a lot of state and this state is in constant flux. A bidder with a high budget may see tens of thousands of bid requests per second. Luckily, due to round-robin load-balancing and the predictability of randomness under heavy traffic, the bidder actors do not share state across cluster nodes, they just share their instance count so they know which fraction of the campaign budget to try to spend.* + +*Pacing is also done for external bidders. Each 3rd party bidder end-point has an actor coordinating requests and measuring latency and throughput. The actor never blocks itself, but when an incoming bid request is received, it considers the current performance of the 3rd party system and decides whether to pass on the request and respond negatively immediately, or forward the request to the 3rd party request executor component (which handles the IO).* + +*Batch processing* +*We store a lot of data about every single ad request we serve and this is stored in a key-value data store. Due to the performance characteristics of the data store, it is not feasible to store every single data point one at at time - it must be batched up and performed in parallel. We don’t need a durable messaging system for this (losing a couple of hundred data points is no biggie). All our data logging happens asynchronously and we have a basic load-balanced actors which batches incoming messages and writes on regular intervals (using Scheduler) or whenever the specified batch size has been reached.* + +*Analytics* +*Needless to say, it’s not feasible / useful to store our traffic information in a relational database. A lot of analytics and data analysis is done “offline” with map / reduce on top the data store, but this doesn’t work well for real-time analytics which our customers love. We therefore have metrics actors that receives campaign bidding and click / impression information in real-time, aggregates this information over configurable periods of time and flushes it to the database used for customer dashboards for “semi-real-time” display. Five minute history is considered real-time in this business, but in theory, we could have queried the actors directly for really real-time data. :)* + +*Our Akka journey started as a prototyping project, but Akka has now become a crucial part of our system. All of the above mentioned components, except the 3rd party bidder integration, have been running in production for a couple of weeks (on Akka 1.0RC3) and we have not seen any issues at all so far."* + +Flowdock +-------- + +Flowdock delivers Google Wave for the corporate world. + +*"Flowdock makes working together a breeze. Organize the flow of information, task things over and work together towards common goals seamlessly on the web - in real time."* + +``_ + +Travel Budget +------------- + +``_ + +Says.US +------- + +*"says.us is a gathering place for people to connect in real time - whether an informal meeting of people who love Scala or a chance for people anywhere to speak out about the latest headlines."* + +``_ + +LShift +------ + +* *"Diffa is an open source data analysis tool that automatically establishes data differences between two or more real-time systems.* +* Diffa will help you compare local or distributed systems for data consistency, without having to stop them running or implement manual cross-system comparisons. The interface provides you with simple visual summary of any consistency breaks and tools to investigate the issues.* +* Diffa is the ideal tool to use to investigate where or when inconsistencies are occuring, or simply to provide confidence that your systems are running in perfect sync. It can be used operationally as an early warning system, in deployment for release verification, or in development with other enterprise diagnosis tools to help troubleshoot faults."* + +``_ + +Twimpact +-------- + +*"Real-time twitter trends and user impact"* + +``_ + +Rocket Pack Platform +-------------------- + +*"Rocket Pack Platform is the only fully integrated solution for plugin-free browser game development."* + +``_ + +Open Source Projects using Akka +******************************* + +Redis client +------------ + +*A Redis client written Scala, using Akka actors, HawtDispath and non-blocking IO. Supports Redis 2.0+* + +``_ + +Narrator +-------- + +*"Narrator is a a library which can be used to create story driven clustered load-testing packages through a very readable and understandable api."* + +``_ + +Kandash +------- + +*"Kandash is a lightweight kanban web-based board and set of analytics tools."* + +``_ +``_ + +Wicket Cassandra Datastore +-------------------------- + +This project provides an org.apache.wicket.pageStore.IDataStore implementation that writes pages to an Apache Cassandra cluster using Akka. + +``_ + +Spray +----- + +*"spray is a lightweight Scala framework for building RESTful web services on top of Akka actors and Akka Mist. It sports the following main features:* + +* *Completely asynchronous, non-blocking, actor-based request processing for efficiently handling very high numbers of concurrent connections* +* *Powerful, flexible and extensible internal Scala DSL for declaratively defining your web service behavior* +* *Immutable model of the HTTP protocol, decoupled from the underlying servlet container* +* *Full testability of your REST services, without the need to fire up containers or actors"* + +``_ diff --git a/akka-docs/pending/configuration.rst b/akka-docs/pending/configuration.rst new file mode 100644 index 0000000000..19d4a1a566 --- /dev/null +++ b/akka-docs/pending/configuration.rst @@ -0,0 +1,180 @@ +Configuration +============= + +Specifying the configuration file +--------------------------------- + +If you don't specify a configuration file then Akka is using default values. If you want to override these then you should edit the 'akka.conf' file in the 'AKKA_HOME/config' directory. This config inherits from the 'akka-reference.conf' file that you see below, use your 'akka.conf' to override any property in the reference config. + +The config can be specified in a various of ways: + +* Define the '-Dakka.config=...' system property option. +* Put the 'akka.conf' file on the classpath. +* Define 'AKKA_HOME' environment variable pointing to the root of the Akka distribution, in which the config is taken from the 'AKKA_HOME/config' directory, you can also point to the AKKA_HOME by specifying the '-Dakka.home=...' system property option. + +Defining the configuration file +------------------------------- + +``_ +#################### +# Akka Config File # +#################### + +# This file has all the default settings, so all these could be removed with no visible effect. +# Modify as needed. + +akka { + version = "1.1-SNAPSHOT" # Akka version, checked against the runtime version of Akka. + + enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"] + + time-unit = "seconds" # Time unit for all timeout properties throughout the config + + event-handlers = ["akka.event.EventHandler$DefaultListener"] # event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT) + event-handler-level = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG + + # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up + # Can be used to bootstrap your application(s) + # Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor + # boot = ["sample.camel.Boot", + # "sample.rest.java.Boot", + # "sample.rest.scala.Boot", + # "sample.security.Boot"] + boot = [] + + actor { + timeout = 5 # Default timeout for Future based invocations + # - Actor: !! && !!! + # - UntypedActor: sendRequestReply && sendRequestReplyFuture + # - TypedActor: methods with non-void return type + serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability + throughput = 5 # Default throughput for all ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness + throughput-deadline-time = -1 # Default throughput deadline for all ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline + dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down + + default-dispatcher { + type = "GlobalExecutorBasedEventDriven" # Must be one of the following, all "Global*" are non-configurable + # - ExecutorBasedEventDriven + # - ExecutorBasedEventDrivenWorkStealing + # - GlobalExecutorBasedEventDriven + keep-alive-time = 60 # Keep alive time for threads + core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor) + max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor) + executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded + allow-core-timeout = on # Allow core threads to time out + rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard + throughput = 5 # Throughput for ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness + throughput-deadline-time = -1 # Throughput deadline for ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline + mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set using the property + # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, + # could lead to deadlock, use with care + # + # The following are only used for ExecutorBasedEventDriven + # and only if mailbox-capacity > 0 + mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout + # (in unit defined by the time-unit property) + } + } + + stm { + fair = on # Should global transactions be fair or non-fair (non fair yield better performance) + max-retries = 1000 + timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by + # the time-unit property) + write-skew = true + blocking-allowed = false + interruptible = false + speculative = true + quick-release = true + propagation = "requires" + trace-level = "none" + } + + jta { + provider = "from-jndi" # Options: - "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI) + # - "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta', + # e.g. you need the akka-jta JARs on classpath). + timeout = 60 + } + + http { + hostname = "localhost" + port = 9998 + + #If you are using akka.http.AkkaRestServlet + filters = ["se.scalablesolutions.akka.security.AkkaSecurityFilterFactory"] # List with all jersey filters to use + # resource-packages = ["sample.rest.scala", + # "sample.rest.java", + # "sample.security"] # List with all resource packages for your Jersey services + resource-packages = [] + + # The authentication service to use. Need to be overridden (sample now) + # authenticator = "sample.security.BasicAuthenticationService" + authenticator = "N/A" + + # Uncomment if you are using the KerberosAuthenticationActor + # kerberos { + # servicePrincipal = "HTTP/localhost@EXAMPLE.COM" + # keyTabLocation = "URL to keytab" + # kerberosDebug = "true" + # realm = "EXAMPLE.COM" + # } + kerberos { + servicePrincipal = "N/A" + keyTabLocation = "N/A" + kerberosDebug = "N/A" + realm = "" + } + + #If you are using akka.http.AkkaMistServlet + mist-dispatcher { + #type = "GlobalExecutorBasedEventDriven" # Uncomment if you want to use a different dispatcher than the default one for Comet + } + connection-close = true # toggles the addition of the "Connection" response header with a "close" value + root-actor-id = "_httproot" # the id of the actor to use as the root endpoint + root-actor-builtin = true # toggles the use of the built-in root endpoint base class + timeout = 1000 # the default timeout for all async requests (in ms) + expired-header-name = "Async-Timeout" # the name of the response header to use when an async request expires + expired-header-value = "expired" # the value of the response header to use when an async request expires + } + + remote { + + # secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_secure_cookie.sh' or using 'Crypt.generateSecureCookie' + secure-cookie = "" + + compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression + zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6 + + layer = "akka.remote.netty.NettyRemoteSupport" + + server { + hostname = "localhost" # The hostname or IP that clients should connect to + port = 2552 # The port clients should connect to. Default is 2552 (AKKA) + message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads + connection-timeout = 1 + require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)? + untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect. + backlog = 4096 # Sets the size of the connection backlog + execution-pool-keepalive = 60# Length in akka.time-unit how long core threads will be kept alive if idling + execution-pool-size = 16# Size of the core pool of the remote execution unit + max-channel-memory-size = 0 # Maximum channel size, 0 for off + max-total-memory-size = 0 # Maximum total size of all channels, 0 for off + } + + client { + buffering { + retry-message-send-on-failure = on + capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set using the property + } + reconnect-delay = 5 + read-timeout = 10 + message-frame-size = 1048576 + reap-futures-delay = 5 + reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for + } + } +} +``_ diff --git a/akka-docs/pending/dataflow-java.rst b/akka-docs/pending/dataflow-java.rst new file mode 100644 index 0000000000..a5f1929431 --- /dev/null +++ b/akka-docs/pending/dataflow-java.rst @@ -0,0 +1,189 @@ +Dataflow Concurrency (Java) +=========================== + +Introduction +------------ + +**IMPORTANT: As of Akka 1.1, Akka Future, CompletableFuture and DefaultCompletableFuture have all the functionality of DataFlowVariables, they also support non-blocking composition and advanced features like fold and reduce, Akka DataFlowVariable is therefor deprecated and will probably resurface in the following release as a DSL on top of Futures.** + +Akka implements `Oz-style dataflow concurrency `_ through dataflow (single assignment) variables and lightweight (event-based) processes/threads. + +Dataflow concurrency is deterministic. This means that it will always behave the same. If you run it once and it yields output 5 then it will do that **every time**, run it 10 million times, same result. If it on the other hand deadlocks the first time you run it, then it will deadlock **every single time** you run it. Also, there is **no difference** between sequential code and concurrent code. These properties makes it very easy to reason about concurrency. The limitation is that the code needs to be side-effect free, e.g. deterministic. You can't use exceptions, time, random etc., but need to treat the part of your program that uses dataflow concurrency as a pure function with input and output. + +The best way to learn how to program with dataflow variables is to read the fantastic book `Concepts, Techniques, and Models of Computer Programming `_. By Peter Van Roy and Seif Haridi. + +The documentation is not as complete as it should be, something we will improve shortly. For now, besides above listed resources on dataflow concurrency, I recommend you to read the documentation for the GPars implementation, which is heavily influenced by the Akka implementation: +* ``_ +* ``_ + +Dataflow Variables +------------------ + +Dataflow Variable defines three different operations: + +1. Define a Dataflow Variable + +.. code-block:: java + + import static akka.dataflow.DataFlow.*; + + DataFlowVariable x = new DataFlowVariable(); + +2. Wait for Dataflow Variable to be bound + +.. code-block:: java + + x.get(); + +3. Bind Dataflow Variable + +.. code-block:: java + + x.set(3); + +A Dataflow Variable can only be bound once. Subsequent attempts to bind the variable will throw an exception. + +You can also shutdown a dataflow variable like this: + +.. code-block:: java + + x.shutdown(); + +Threads +------- + +You can easily create millions lightweight (event-driven) threads on a regular workstation. + +.. code-block:: java + + import static akka.dataflow.DataFlow.*; + import akka.japi.Effect; + + thread(new Effect() { + public void apply() { ... } + }); + +You can also set the thread to a reference to be able to control its life-cycle: + +.. code-block:: java + + import static akka.dataflow.DataFlow.*; + import akka.japi.Effect; + + ActorRef t = thread(new Effect() { + public void apply() { ... } + }); + + ... // time passes + + t.sendOneWay(new Exit()); // shut down the thread + +Examples +-------- + +Most of these examples are taken from the `Oz wikipedia page `_ + +Simple DataFlowVariable example +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This example is from Oz wikipedia page: http://en.wikipedia.org/wiki/Oz_(programming_language). +Sort of the "Hello World" of dataflow concurrency. + +Example in Oz: + +.. code-block:: ruby + + thread + Z = X+Y % will wait until both X and Y are bound to a value. + {Browse Z} % shows the value of Z. + end + thread X = 40 end + thread Y = 2 end + +Example in Akka: + +.. code-block:: java + + import static akka.dataflow.DataFlow.*; + import akka.japi.Effect; + + DataFlowVariable x = new DataFlowVariable(); + DataFlowVariable y = new DataFlowVariable(); + DataFlowVariable z = new DataFlowVariable(); + + thread(new Effect() { + public void apply() { + z.set(x.get() + y.get()); + System.out.println("z = " + z.get()); + } + }); + + thread(new Effect() { + public void apply() { + x.set(40); + } + }); + + thread(new Effect() { + public void apply() { + y.set(40); + } + }); + +Example on life-cycle management of DataFlowVariables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Shows how to shutdown dataflow variables and bind threads to values to be able to interact with them (exit etc.). + +Example in Akka: + +.. code-block:: java + import static akka.dataflow.DataFlow.*; + import akka.japi.Effect; + + // create four 'int' data flow variables + DataFlowVariable x = new DataFlowVariable(); + DataFlowVariable y = new DataFlowVariable(); + DataFlowVariable z = new DataFlowVariable(); + DataFlowVariable v = new DataFlowVariable(); + + ActorRef main = thread(new Effect() { + public void apply() { + System.out.println("Thread 'main'") + if (x.get() > y.get()) { + z.set(x); + System.out.println("'z' set to 'x': " + z.get()); + } else { + z.set(y); + System.out.println("'z' set to 'y': " + z.get()); + } + + // main completed, shut down the data flow variables + x.shutdown(); + y.shutdown(); + z.shutdown(); + v.shutdown(); + } + }); + + ActorRef setY = thread(new Effect() { + public void apply() { + System.out.println("Thread 'setY', sleeping..."); + Thread.sleep(5000); + y.set(2); + System.out.println("'y' set to: " + y.get()); + } + }); + + ActorRef setV = thread(new Effect() { + public void apply() { + System.out.println("Thread 'setV'"); + y.set(2); + System.out.println("'v' set to y: " + v.get()); + } + }); + + // shut down the threads + main.sendOneWay(new Exit()); + setY.sendOneWay(new Exit()); + setV.sendOneWay(new Exit()); diff --git a/akka-docs/pending/dataflow-scala.rst b/akka-docs/pending/dataflow-scala.rst new file mode 100644 index 0000000000..c935537cae --- /dev/null +++ b/akka-docs/pending/dataflow-scala.rst @@ -0,0 +1,232 @@ +Dataflow Concurrency (Scala) +============================ + +Description +----------- + +**IMPORTANT: As of Akka 1.1, Akka Future, CompletableFuture and DefaultCompletableFuture have all the functionality of DataFlowVariables, they also support non-blocking composition and advanced features like fold and reduce, Akka DataFlowVariable is therefor deprecated and will probably resurface in the following release as a DSL on top of Futures.** + +Akka implements `Oz-style dataflow concurrency `_ through dataflow (single assignment) variables and lightweight (event-based) processes/threads. + +Dataflow concurrency is deterministic. This means that it will always behave the same. If you run it once and it yields output 5 then it will do that **every time**, run it 10 million times, same result. If it on the other hand deadlocks the first time you run it, then it will deadlock **every single time** you run it. Also, there is **no difference** between sequential code and concurrent code. These properties makes it very easy to reason about concurrency. The limitation is that the code needs to be side-effect free, e.g. deterministic. You can't use exceptions, time, random etc., but need to treat the part of your program that uses dataflow concurrency as a pure function with input and output. + +The best way to learn how to program with dataflow variables is to read the fantastic book `Concepts, Techniques, and Models of Computer Programming `_. By Peter Van Roy and Seif Haridi. + +The documentation is not as complete as it should be, something we will improve shortly. For now, besides above listed resources on dataflow concurrency, I recommend you to read the documentation for the GPars implementation, which is heavily influenced by the Akka implementation: + +* ``_ +* ``_ + +Dataflow Variables +------------------ + +Dataflow Variable defines three different operations: + +1. Define a Dataflow Variable + +.. code-block:: scala + + val x = new DataFlowVariable[Int] + +2. Wait for Dataflow Variable to be bound + +.. code-block:: scala + + x() + +3. Bind Dataflow Variable + +.. code-block:: scala + + x << 3 + +A Dataflow Variable can only be bound once. Subsequent attempts to bind the variable will throw an exception. + +You can also shutdown a dataflow variable like this: + +.. code-block:: scala + + x.shutdown + +Threads +------- + +You can easily create millions lightweight (event-driven) threads on a regular workstation. + +.. code-block:: scala + + thread { ... } + +You can also set the thread to a reference to be able to control its life-cycle: + +.. code-block:: scala + + val t = thread { ... } + + ... // time passes + + t ! 'exit // shut down the thread + +Examples +-------- + +Most of these examples are taken from the `Oz wikipedia page `_ + +To run these examples: + +1. Start REPL + +:: + + $ sbt + > project akka-actor + > console + +:: + + Welcome to Scala version 2.8.0.final (Java HotSpot(TM) 64-Bit Server VM, Java 1.6.0_22). + Type in expressions to have them evaluated. + Type :help for more information. + + scala> + +2. Paste the examples (below) into the Scala REPL. +Note: Do not try to run the Oz version, it is only there for reference. + +3. Have fun. + +Simple DataFlowVariable example +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This example is from Oz wikipedia page: http://en.wikipedia.org/wiki/Oz_(programming_language). +Sort of the "Hello World" of dataflow concurrency. + +Example in Oz: + +.. code-block:: ruby + + thread + Z = X+Y % will wait until both X and Y are bound to a value. + {Browse Z} % shows the value of Z. + end + thread X = 40 end + thread Y = 2 end + +Example in Akka: + +.. code-block:: scala + + import akka.dataflow.DataFlow._ + + val x, y, z = new DataFlowVariable[Int] + + thread { + z << x() + y() + println("z = " + z()) + } + thread { x << 40 } + thread { y << 2 } + +Example of using DataFlowVariable with recursion +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using DataFlowVariable and recursion to calculate sum. + +Example in Oz: + +.. code-block:: ruby + + fun {Ints N Max} + if N == Max then nil + else + {Delay 1000} + N|{Ints N+1 Max} + end + end + + fun {Sum S Stream} + case Stream of nil then S + [] H|T then S|{Sum H+S T} end + end + + local X Y in + thread X = {Ints 0 1000} end + thread Y = {Sum 0 X} end + {Browse Y} + end + +Example in Akka: + +.. code-block:: scala + + import akka.dataflow.DataFlow._ + + def ints(n: Int, max: Int): List[Int] = + if (n == max) Nil + else n :: ints(n + 1, max) + + def sum(s: Int, stream: List[Int]): List[Int] = stream match { + case Nil => s :: Nil + case h :: t => s :: sum(h + s, t) + } + + val x = new DataFlowVariable[List[Int]] + val y = new DataFlowVariable[List[Int]] + + thread { x << ints(0, 1000) } + thread { y << sum(0, x()) } + thread { println("List of sums: " + y()) } + +Example on life-cycle management of DataFlowVariables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Shows how to shutdown dataflow variables and bind threads to values to be able to interact with them (exit etc.). + +Example in Akka: + +.. code-block:: scala + import akka.dataflow.DataFlow._ + + // create four 'Int' data flow variables + val x, y, z, v = new DataFlowVariable[Int] + + val main = thread { + println("Thread 'main'") + + x << 1 + println("'x' set to: " + x()) + + println("Waiting for 'y' to be set...") + + if (x() > y()) { + z << x + println("'z' set to 'x': " + z()) + } else { + z << y + println("'z' set to 'y': " + z()) + } + + // main completed, shut down the data flow variables + x.shutdown + y.shutdown + z.shutdown + v.shutdown + } + + val setY = thread { + println("Thread 'setY', sleeping...") + Thread.sleep(5000) + y << 2 + println("'y' set to: " + y()) + } + + val setV = thread { + println("Thread 'setV'") + v << y + println("'v' set to 'y': " + v()) + } + + // shut down the threads + main ! 'exit + setY ! 'exit + setV ! 'exit diff --git a/akka-docs/pending/deployment-scenarios.rst b/akka-docs/pending/deployment-scenarios.rst new file mode 100644 index 0000000000..9c67cda10d --- /dev/null +++ b/akka-docs/pending/deployment-scenarios.rst @@ -0,0 +1,100 @@ + + +Use-case and Deployment Scenarios +================================= + += + +How and in which use-case and deployment scenarios can I use Akka? +================================================================== + +Akka can be used in two different ways: +* As a library: used as a regular JAR on the classpath and/or in a web app, to be put into ‘WEB-INF/lib’ +* As a microkernel: stand-alone microkernel, embedding a servlet container along with many other services. + +Using Akka as library +--------------------- + +This is most likely what you want if you are building Web applications. +There are several ways you can use Akka in Library mode by adding more and more modules to the stack. + +Actors as services +^^^^^^^^^^^^^^^^^^ + +The simplest way you can use Akka is to use the actors as services in your Web application. All that’s needed to do that is to put the Akka charts as well as its dependency jars into ‘WEB-INF/lib’. You also need to put the ‘akka.conf’ config file in the ‘$AKKA_HOME/config’ directory. +Now you can create your Actors as regular services referenced from your Web application. You should also be able to use the Remoting service, e.g. be able to make certain Actors remote on other hosts. Please note that remoting service does not speak HTTP over port 80, but a custom protocol over the port is specified in ‘akka.conf’. +``_ + +^ + +Actors as services with Software Transactional Memory (STM) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As in the above, but with the addition of using the STM module to allow transactional memory across many Actors (no persistence, just in-memory). +``_ + +^ + +Actors as services with Persistence module as cache +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As in the above, but with the addition of using the Persistence module to allow transactional persistent cache. This use case scenario you would still use a regular relational database (RDBMS) but use Akka’s transactional persistent storage as a performant scalable cache alongside the RDBMS. +``_ + +^ + +Actors as services with Persistence module as primary storage/Service of Record (SoR) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As in the above, but with the addition of using the Persistence module as the primary storage/SoR. In this use case you wouldn’t use a RDBMS at all but rely on one of the Akka backends (Cassandra, Terracotta, Redis, MongoDB etc.) as transactional persistent storage. This is great if have either high performance, scalability or high-availability requirements where a RDBMS would be either single point of failure or single point of bottleneck or just be too slow. +If the storage API (Maps, Vectors or Refs) is too constrained for some use cases we can bypass it and use the storage directly. However, please note that then we will lose the transactional semantics. +``_ + +^ + +Actors as REST/Comet (push) services +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can also expose your library Actors directly as REST (`JAX `_`-RS `_) or Comet (`Atmosphere `_) services by deploying the ‘AkkaServlet’ in your servlet container. In order for this to work in each define a so-called “boot” class which bootstraps the Actor configuration, wiring and startup. This is done in the ‘akka.conf’ file. +``_ + +- + +Using Akka as a stand alone microkernel +--------------------------------------- + +Akka can also be run as a stand-alone microkernel. It implements a full enterprise stack: + +^ + +Web/REST/Comet layer +^^^^^^^^^^^^^^^^^^^^ + +Akka currently embeds the `Grizzly/GlassFish `_ servlet container (but will soon be pluggable with Jetty as well) which allows to build REST-based using `JAX `_`-RS `_ and Comet-based services using `Atmosphere `_ as well as regular Web applications using JAX-RS’s `implicit views `_ (see also `James Strachan’s article `_). + +^ + +Service layer +^^^^^^^^^^^^^ + +The service layer is implemented using fault tolerant, asynchronous, throttled message passing; like `SEDA-in-a-box `_ using Actors. + +Persistence layer +^^^^^^^^^^^^^^^^^ + + Implemented using pluggable storage engines for both partitioned distributed massively scalable storage (like Cassandra) as well as single node storage (like MongoDB). A different storage and gives also provides different consistency/availability trade-offs implementing either Eventually Consistency (BASE) or Atomicity (ACID). + +Monitoring and Management layer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Providing both JMX management and monitoring as well as w3c logging. + ``_ + +Use BivySack for packaging your application +------------------------------------------- + +"BivySack" For Akka - SBT plugin which creates a full akka microkernel deployment for your project. + +Quick and dirty SBT Plugin for creating Akka Microkernel deployments of your SBT Project. This creates a proper "akka deploy" setup with all of your dependencies and configuration files loaded, with a bootable version of your project that you can run cleanly. + +Read more about it here ``_. diff --git a/akka-docs/pending/developer-guidelines.rst b/akka-docs/pending/developer-guidelines.rst new file mode 100644 index 0000000000..bf2e9dad26 --- /dev/null +++ b/akka-docs/pending/developer-guidelines.rst @@ -0,0 +1,44 @@ +Developer Guidelines +==================== + +Code Style +---------- + +The Akka code style follows `this document `_ . + +Here is a code style settings file for IntelliJ IDEA. +``_ + +Please follow the code style. Look at the code around you and mimic. + +Testing +------- + +All code that is checked in should have tests. All testing is done with ScalaTest and ScalaCheck. + +* Name tests as *Test.scala if they do not depend on any external stuff. That keeps surefire happy. +* Name tests as *Spec.scala if they have external dependencies. + +There is a testing standard that should be followed: `Ticket001Spec <@https://github.com/jboner/akka/blob/master/akka-actor/src/test/scala/akka/ticket/Ticket001Spec.scala>`_ + +Actor TestKit +^^^^^^^^^^^^^ + +There is a useful test kit for testing actors: `akka.util.TestKit <@https://github.com/jboner/akka/tree/master/akka-actor/src/main/scala/akka/util/TestKit.scala>`_. It enables assertions concerning replies received and their timing, there is more documentation in the ``_ module. + +NetworkFailureTest +^^^^^^^^^^^^^^^^^^ + +You can use the 'NetworkFailureTest' trait to test network failure. See the 'RemoteErrorHandlingNetworkTest' test. Your tests needs to end with 'NetworkTest'. They are disabled by default. To run them you need to enable a flag. + +Example: + +:: + + project akka-remote + set akka.test.network true + test-only akka.actor.remote.RemoteErrorHandlingNetworkTest + +It uses 'ipfw' for network management. Mac OSX comes with it installed but if you are on another platform you might need to install it yourself. Here is a port: + +``_ diff --git a/akka-docs/pending/dispatchers-java.rst b/akka-docs/pending/dispatchers-java.rst new file mode 100644 index 0000000000..3aa1a34f13 --- /dev/null +++ b/akka-docs/pending/dispatchers-java.rst @@ -0,0 +1,328 @@ +Dispatchers (Java) +================== + +Module stability: **SOLID** + +The Dispatcher is an important piece that allows you to configure the right semantics and parameters for optimal performance, throughput and scalability. Different Actors have different needs. + +Akka supports dispatchers for both event-driven lightweight threads, allowing creation of millions threads on a single workstation, and thread-based Actors, where each dispatcher is bound to a dedicated OS thread. + +The event-based Actors currently consume ~600 bytes per Actor which means that you can create more than 6.5 million Actors on 4 G RAM. + +Default dispatcher +------------------ + +For most scenarios the default settings are the best. Here we have one single event-based dispatcher for all Actors created. The dispatcher used is this one: + +.. code-block:: java + + Dispatchers.globalExecutorBasedEventDrivenDispatcher(); + +But if you feel that you are starting to contend on the single dispatcher (the 'Executor' and its queue) or want to group a specific set of Actors for a dedicated dispatcher for better flexibility and configurability then you can override the defaults and define your own dispatcher. See below for details on which ones are available and how they can be configured. + +Setting the dispatcher +---------------------- + +Normally you set the dispatcher from within the Actor itself. The dispatcher is defined by the 'dispatcher: MessageDispatcher' member field in 'ActorRef'. + +.. code-block:: java + + class MyActor extends UntypedActor { + public MyActor() { + getContext().setDispatcher(..); // set the dispatcher + } + ... + } + +You can also set the dispatcher for an Actor **before** it has been started: + +.. code-block:: java + + actorRef.setDispatcher(dispatcher); + +Types of dispatchers +-------------------- + +There are six different types of message dispatchers: + +* Thread-based +* Event-based +* Priority event-based +* Work-stealing event-based +* HawtDispatch-based event-driven + +Factory methods for all of these, including global versions of some of them, are in the 'akka.dispatch.Dispatchers' object. + +Let's now walk through the different dispatchers in more detail. + +Thread-based +^^^^^^^^^^^^ + +The 'ThreadBasedDispatcher' binds a dedicated OS thread to each specific Actor. The messages are posted to a 'LinkedBlockingQueue' which feeds the messages to the dispatcher one by one. A 'ThreadBasedDispatcher' cannot be shared between actors. This dispatcher has worse performance and scalability than the event-based dispatcher but works great for creating "daemon" Actors that consumes a low frequency of messages and are allowed to go off and do their own thing for a longer period of time. Another advantage with this dispatcher is that Actors do not block threads for each other. + +.. code-block:: java + + Dispatcher dispatcher = Dispatchers.newThreadBasedDispatcher(actorRef); + +It would normally by used from within the actor like this: + +.. code-block:: java + + class MyActor extends Actor { + public MyActor() { + getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext())); + } + ... + } + +Event-based +^^^^^^^^^^^ + +The 'ExecutorBasedEventDrivenDispatcher' binds a set of Actors to a thread pool backed up by a 'BlockingQueue'. This dispatcher is highly configurable and supports a fluent configuration API to configure the 'BlockingQueue' (type of queue, max items etc.) as well as the thread pool. + +The event-driven dispatchers **must be shared** between multiple Typed Actors and/or Actors. One best practice is to let each top-level Actor, e.g. the Actors you define in the declarative supervisor config, to get their own dispatcher but reuse the dispatcher for each new Actor that the top-level Actor creates. But you can also share dispatcher between multiple top-level Actors. This is very use-case specific and needs to be tried out on a case by case basis. The important thing is that Akka tries to provide you with the freedom you need to design and implement your system in the most efficient way in regards to performance, throughput and latency. + +It comes with many different predefined BlockingQueue configurations: +* Bounded LinkedBlockingQueue +* Unbounded LinkedBlockingQueue +* Bounded ArrayBlockingQueue +* Unbounded ArrayBlockingQueue +* SynchronousQueue + +You can also set the rejection policy that should be used, e.g. what should be done if the dispatcher (e.g. the Actor) can't keep up and the mailbox is growing up to the limit defined. You can choose between four different rejection policies: + +* java.util.concurrent.ThreadPoolExecutor.CallerRuns - will run the message processing in the caller's thread as a way to slow him down and balance producer/consumer +* java.util.concurrent.ThreadPoolExecutor.AbortPolicy - rejected messages by throwing a 'RejectedExecutionException' +* java.util.concurrent.ThreadPoolExecutor.DiscardPolicy - discards the message (throws it away) +* java.util.concurrent.ThreadPoolExecutor.DiscardOldestPolicy - discards the oldest message in the mailbox (throws it away) + +You cane read more about these policies `here `_. + +Here is an example: + +.. code-block:: java + + class MyActor extends UntypedActor { + public MyActor() { + getContext().setDispatcher(Dispatchers.newExecutorBasedEventDrivenDispatcher(name) + .withNewThreadPoolWithBoundedBlockingQueue(100) + .setCorePoolSize(16) + .setMaxPoolSize(128) + .setKeepAliveTimeInMillis(60000) + .setRejectionPolicy(new CallerRunsPolicy) + .build()); + } + ... + } + +This 'ExecutorBasedEventDrivenDispatcher' allows you to define the 'throughput' it should have. This defines the number of messages for a specific Actor the dispatcher should process in one single sweep. +Setting this to a higher number will increase throughput but lower fairness, and vice versa. If you don't specify it explicitly then it uses the default value defined in the 'akka.conf' configuration file: + +.. code-block:: xml + + actor { + throughput = 5 + } + +If you don't define a the 'throughput' option in the configuration file then the default value of '5' will be used. + +Browse the `ScalaDoc `_ or look at the code for all the options available. + +Priority event-based +^^^^^^^^^^^ + +Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityExecutorBasedEventDrivenDispatcher and supply +a java.util.Comparator[MessageInvocation] or use a akka.dispatch.PriorityGenerator (recommended): + +Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator in Java: + +.. code-block:: java + + package some.package; + + import akka.actor.*; + import akka.dispatch.*; + + public class Main { + // A simple Actor that just prints the messages it processes + public static class MyActor extends UntypedActor { + public void onReceive(Object message) throws Exception { + System.out.println(message); + } + } + + public static void main(String[] args) { + // Create a new PriorityGenerator, lower prio means more important + PriorityGenerator gen = new PriorityGenerator() { + public int gen(Object message) { + if (message == "highpriority") return 0; // "highpriority" messages should be treated first if possible + else if (message == "lowpriority") return 100; // "lowpriority" messages should be treated last if possible + else return 50; // We default to 50 + } + }; + // We create an instance of the actor that will print out the messages it processes + ActorRef ref = Actors.actorOf(MyActor.class); + // We create a new Priority dispatcher and seed it with the priority generator + ref.setDispatcher(new PriorityExecutorBasedEventDrivenDispatcher("foo", gen)); + + ref.start(); // Start the actor + ref.getDispatcher().suspend(ref); // Suspening the actor so it doesn't start to treat the messages before we have enqueued all of them :-) + ref.sendOneWay("lowpriority"); + ref.sendOneWay("lowpriority"); + ref.sendOneWay("highpriority"); + ref.sendOneWay("pigdog"); + ref.sendOneWay("pigdog2"); + ref.sendOneWay("pigdog3"); + ref.sendOneWay("highpriority"); + ref.getDispatcher().resume(ref); // Resuming the actor so it will start treating its messages + } + } + +Prints: + +highpriority +highpriority +pigdog +pigdog2 +pigdog3 +lowpriority +lowpriority + +Work-stealing event-based +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The 'ExecutorBasedEventDrivenWorkStealingDispatcher' is a variation of the 'ExecutorBasedEventDrivenDispatcher' in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they have less messages to process. This can be a great way to improve throughput at the cost of a little higher latency. + +Normally the way you use it is to create an Actor companion object to hold the dispatcher and then set in in the Actor explicitly. + +.. code-block:: java + + class MyActor extends UntypedActor { + public static Dispatcher dispatcher = Dispatchers.newExecutorEventBasedWorkStealingDispatcher(name); + + public MyActor() { + getContext().setDispatcher(dispatcher); + } + ... + } + +Here is an article with some more information: `Load Balancing Actors with Work Stealing Techniques `_ +Here is another article discussing this particular dispatcher: `Flexible load balancing with Akka in Scala `_ + +HawtDispatch-based event-driven +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The 'HawtDispatcher' uses the `HawtDispatch threading library `_ which is a Java clone of libdispatch. All actors with this type of dispatcher are executed on a single system wide fixed sized thread pool. The number of of threads will match the number of cores available on your system. The dispatcher delivers messages to the actors in the order that they were producer at the sender. + +A 'HawtDispatcher' instance can be shared by many actors. Normally the way you use it is to create an Actor companion object to hold the dispatcher and then set in in the Actor explicitly. + +.. code-block:: java + + import akka.actor.dispatch.HawtDispatcher; + + class MyActor extends Actor { + public static Dispatcher dispatcher = new HawtDispatcher(); + + public MyActor() { + getContext().setDispatcher(dispatcher); + } + ... + } + +Since a fixed thread pool is being used, an actor using a 'HawtDispatcher' is restricted to executing non blocking operations. For example, the actor is NOT alllowed to: +* synchronously call another actor +* call 3rd party libraries that can block +* use sockets that are in blocking mode + +HawtDispatch supports integrating non-blocking Socket IO events with your actors. Every thread in the HawtDispatch thread pool is parked in an IO event loop when it is not executing an actors. The IO events can be configured to be get delivered to the actor in either the reactor or proactor style. For an example, see `HawtDispacherEchoServer.scala `_. + +A `HawtDispatcher` will aggregate cross actor messages by default. This means that if Actor *A* is executing and sends actor *B* 10 messages, those messages will not be delivered to actor *B* until *A*'s execution ends. HawtDispatch will aggregate the 10 messages into 1 single enqueue operation on to actor *B*'s inbox. This an significantly reduce mailbox contention when actors are very chatty. If you want to avoid this aggregation behavior, then create the `HawtDispatcher` like this: + +.. code-block:: java + + Dispatcher dispatcher = new HawtDispatcher(false); + +The `HawtDispatcher` provides a companion object that lets you use more advanced HawtDispatch features. For example to pin an actor so that it always executed on the same thread in the thread poool you would: + +.. code-block:: java + + ActorRef a = ... + HawtDispatcher.pin(a); + +If you have an Actor *b* which will be sending many messages to an Actor *a*, then you may want to consider setting *b*'s dispatch target to be *a*'s dispatch queue. When this is the case, messages sent from *b* to a will avoid cross thread mailbox contention. A side-effect of this is that the *a* and *b* actors will execute as if they shared a single mailbox. + +.. code-block:: java + + ActorRef a = ... + ActorRef b = ... + HawtDispatcher.target(b, HawtDispatcher.queue(a)); + +**Java API** + +.. code-block:: java + + MessageDispatcher dispatcher = Dispatchers.newExecutorEventBasedThreadPoolDispatcher(name); + +The dispatcher for an Typed Actor can be defined in the declarative configuration: + +.. code-block:: java + + ... // part of configuration + new Component( + MyTypedActor.class, + MyTypedActorImpl.class, + new LifeCycle(new Permanent()), + dispatcher, // <<== set it here + 1000); + ... + +It can also be set when creating a new Typed Actor programmatically. + +.. code-block:: java + + MyPOJO pojo = (MyPOJO) TypedActor.newInstance(MyPOJO.class, MyPOJOImpl.class, 1000, dispatcher); + +Making the Actor mailbox bounded +-------------------------------- + +Global configuration +^^^^^^^^^^^^^^^^^^^^ + +You can make the Actor mailbox bounded by a capacity in two ways. Either you define it in the configuration file under 'default-dispatcher'. This will set it globally. + +.. code-block:: ruby + + actor { + default-dispatcher { + mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set to the number specificed + } + } + +Per-instance based configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can also do it on a specific dispatcher instance. + +For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingDispatcher' you can do it through their constructor + +.. code-block:: java + + class MyActor extends UntypedActor { + public MyActor() { + getContext().setDispatcher(Dispatchers.newExecutorBasedEventDrivenDispatcher(name, throughput, mailboxCapacity)); + } + ... + } + +For the 'ThreadBasedDispatcher', it is non-shareable between actors, and associates a dedicated Thread with the actor. +Making it bounded (by specifying a capacity) is optional, but if you do, you need to provide a pushTimeout (default is 10 seconds). When trying to send a message to the Actor it will throw a MessageQueueAppendFailedException("BlockingMessageTransferQueue transfer timed out") if the message cannot be added to the mailbox within the time specified by the pushTimeout. + +``_ +class MyActor extends UntypedActor { + public MyActor() { + getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext(), mailboxCapacity, pushTimeout, pushTimeUnit)); + } + ... +} +``_ diff --git a/akka-docs/pending/dispatchers-scala.rst b/akka-docs/pending/dispatchers-scala.rst new file mode 100644 index 0000000000..9b67b7b58e --- /dev/null +++ b/akka-docs/pending/dispatchers-scala.rst @@ -0,0 +1,274 @@ +Dispatchers (Scala) +=================== + +Module stability: **SOLID** + +The Dispatcher is an important piece that allows you to configure the right semantics and parameters for optimal performance, throughput and scalability. Different Actors have different needs. + +Akka supports dispatchers for both event-driven lightweight threads, allowing creation of millions threads on a single workstation, and thread-based Actors, where each dispatcher is bound to a dedicated OS thread. + +The event-based Actors currently consume ~600 bytes per Actor which means that you can create more than 6.5 million Actors on 4 G RAM. + +Default dispatcher +------------------ + +For most scenarios the default settings are the best. Here we have one single event-based dispatcher for all Actors created. The dispatcher used is this one: + +.. code-block:: scala + + Dispatchers.globalExecutorBasedEventDrivenDispatcher + +But if you feel that you are starting to contend on the single dispatcher (the 'Executor' and its queue) or want to group a specific set of Actors for a dedicated dispatcher for better flexibility and configurability then you can override the defaults and define your own dispatcher. See below for details on which ones are available and how they can be configured. + +Setting the dispatcher +---------------------- + +Normally you set the dispatcher from within the Actor itself. The dispatcher is defined by the 'dispatcher: MessageDispatcher' member field in 'ActorRef'. + +.. code-block:: scala + + class MyActor extends Actor { + self.dispatcher = ... // set the dispatcher + ... + } + +You can also set the dispatcher for an Actor **before** it has been started: + +.. code-block:: scala + + actorRef.dispatcher = dispatcher + +Types of dispatchers +-------------------- + +There are six different types of message dispatchers: + +* Thread-based +* Event-based +* Priority event-based +* Work-stealing +* HawtDispatch-based event-driven + +Factory methods for all of these, including global versions of some of them, are in the 'akka.dispatch.Dispatchers' object. + +Let's now walk through the different dispatchers in more detail. + +Event-based +^^^^^^^^^^^ + +The 'ExecutorBasedEventDrivenDispatcher' binds a set of Actors to a thread pool backed up by a 'BlockingQueue'. This dispatcher is highly configurable and supports a fluent configuration API to configure the 'BlockingQueue' (type of queue, max items etc.) as well as the thread pool. + +The event-driven dispatchers **must be shared** between multiple Actors. One best practice is to let each top-level Actor, e.g. the Actors you define in the declarative supervisor config, to get their own dispatcher but reuse the dispatcher for each new Actor that the top-level Actor creates. But you can also share dispatcher between multiple top-level Actors. This is very use-case specific and needs to be tried out on a case by case basis. The important thing is that Akka tries to provide you with the freedom you need to design and implement your system in the most efficient way in regards to performance, throughput and latency. + +It comes with many different predefined BlockingQueue configurations: +* Bounded LinkedBlockingQueue +* Unbounded LinkedBlockingQueue +* Bounded ArrayBlockingQueue +* Unbounded ArrayBlockingQueue +* SynchronousQueue + +You can also set the rejection policy that should be used, e.g. what should be done if the dispatcher (e.g. the Actor) can't keep up and the mailbox is growing up to the limit defined. You can choose between four different rejection policies: + +* java.util.concurrent.ThreadPoolExecutor.CallerRuns - will run the message processing in the caller's thread as a way to slow him down and balance producer/consumer +* java.util.concurrent.ThreadPoolExecutor.AbortPolicy - rejected messages by throwing a 'RejectedExecutionException' +* java.util.concurrent.ThreadPoolExecutor.DiscardPolicy - discards the message (throws it away) +* java.util.concurrent.ThreadPoolExecutor.DiscardOldestPolicy - discards the oldest message in the mailbox (throws it away) + +You cane read more about these policies `here `_. + +Here is an example: + +.. code-block:: scala + + class MyActor extends Actor { + self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(name) + .withNewThreadPoolWithBoundedBlockingQueue(100) + .setCorePoolSize(16) + .setMaxPoolSize(128) + .setKeepAliveTimeInMillis(60000) + .setRejectionPolicy(new CallerRunsPolicy) + .build + ... + } + +This 'ExecutorBasedEventDrivenDispatcher' allows you to define the 'throughput' it should have. This defines the number of messages for a specific Actor the dispatcher should process in one single sweep. +Setting this to a higher number will increase throughput but lower fairness, and vice versa. If you don't specify it explicitly then it uses the default value defined in the 'akka.conf' configuration file: + +.. code-block:: ruby + + actor { + throughput = 5 + } + +If you don't define a the 'throughput' option in the configuration file then the default value of '5' will be used. + +Browse the `ScalaDoc `_ or look at the code for all the options available. + +Priority event-based +^^^^^^^^^^^ + +Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityExecutorBasedEventDrivenDispatcher and supply +a java.util.Comparator[MessageInvocation] or use a akka.dispatch.PriorityGenerator (recommended): + +Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator in Java: + +.. code-block:: scala + + import akka.dispatch._ + + import akka.actor._ + + val gen = PriorityGenerator { // Create a new PriorityGenerator, lower prio means more important + case 'highpriority => 0 // 'highpriority messages should be treated first if possible + case 'lowpriority => 100 // 'lowpriority messages should be treated last if possible + case otherwise => 50 // We default to 50 + } + + val a = Actor.actorOf( // We create a new Actor that just prints out what it processes + new Actor { + def receive = { + case x => println(x) + } + }) + + // We create a new Priority dispatcher and seed it with the priority generator + a.dispatcher = new PriorityExecutorBasedEventDrivenDispatcher("foo", gen) + a.start // Start the Actor + + a.dispatcher.suspend(a) // Suspening the actor so it doesn't start to treat the messages before we have enqueued all of them :-) + + a ! 'lowpriority + + a ! 'lowpriority + + a ! 'highpriority + + a ! 'pigdog + + a ! 'pigdog2 + + a ! 'pigdog3 + + a ! 'highpriority + + a.dispatcher.resume(a) // Resuming the actor so it will start treating its messages + +Prints: + +'highpriority +'highpriority +'pigdog +'pigdog2 +'pigdog3 +'lowpriority +'lowpriority + +Work-stealing event-based +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The 'ExecutorBasedEventDrivenWorkStealingDispatcher' is a variation of the 'ExecutorBasedEventDrivenDispatcher' in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they have less messages to process. This can be a great way to improve throughput at the cost of a little higher latency. + +Normally the way you use it is to create an Actor companion object to hold the dispatcher and then set in in the Actor explicitly. + +.. code-block:: scala + + object MyActor { + val dispatcher = Dispatchers.newExecutorEventBasedWorkStealingDispatcher(name) + } + + class MyActor extends Actor { + self.dispatcher = MyActor.dispatcher + ... + } + +Here is an article with some more information: `Load Balancing Actors with Work Stealing Techniques `_ +Here is another article discussing this particular dispatcher: `Flexible load balancing with Akka in Scala `_ + +HawtDispatch-based event-driven +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The 'HawtDispatcher' uses the `HawtDispatch threading library `_ which is a Java clone of libdispatch. All actors with this type of dispatcher are executed on a single system wide fixed sized thread pool. The number of of threads will match the number of cores available on your system. The dispatcher delivers messages to the actors in the order that they were producer at the sender. + +A 'HawtDispatcher' instance can be shared by many actors. Normally the way you use it is to create an Actor companion object to hold the dispatcher and then set in in the Actor explicitly. + +.. code-block:: scala + + import akka.dispatch.HawtDispatcher + + object MyActor { + val dispatcher = new HawtDispatcher + } + + class MyActor extends Actor { + self.dispatcher = MyActor.dispatcher + ... + } + +Since a fixed thread pool is being used, an actor using a 'HawtDispatcher' is restricted to executing non blocking operations. For example, the actor is NOT alllowed to: +* synchronously call another actor +* call 3rd party libraries that can block +* use sockets that are in blocking mode + +HawtDispatch supports integrating non-blocking Socket IO events with your actors. Every thread in the HawtDispatch thread pool is parked in an IO event loop when it is not executing an actors. The IO events can be configured to be get delivered to the actor in either the reactor or proactor style. For an example, see `HawtDispacherEchoServer.scala `_. + +A `HawtDispatcher` will aggregate cross actor messages by default. This means that if Actor *A* is executing and sends actor *B* 10 messages, those messages will not be delivered to actor *B* until *A*'s execution ends. HawtDispatch will aggregate the 10 messages into 1 single enqueue operation on to actor *B*'s inbox. This an significantly reduce mailbox contention when actors are very chatty. If you want to avoid this aggregation behavior, then create the `HawtDispatcher` like this: + +.. code-block:: scala + + val dispatcher = new HawtDispatcher(false) + +The `HawtDispatcher` provides a companion object that lets you use more advanced HawtDispatch features. For example to pin an actor so that it always executed on the same thread in the thread poool you would: + +.. code-block:: scala + + val a: ActorRef = ... + HawtDispatcher.pin(a) + +If you have an Actor *b* which will be sending many messages to an Actor *a*, then you may want to consider setting *b*'s dispatch target to be *a*'s dispatch queue. When this is the case, messages sent from *b* to a will avoid cross thread mailbox contention. A side-effect of this is that the *a* and *b* actors will execute as if they shared a single mailbox. + +.. code-block:: scala + + val a: ActorRef = ... + val b: ActorRef = ... + HawtDispatcher.target(b, HawtDispatcher.queue(a)) + +Making the Actor mailbox bounded +-------------------------------- + +Global configuration +^^^^^^^^^^^^^^^^^^^^ + +You can make the Actor mailbox bounded by a capacity in two ways. Either you define it in the configuration file under 'default-dispatcher'. This will set it globally. + +.. code-block:: ruby + + actor { + default-dispatcher { + mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set to the number specificed + } + } + +Per-instance based configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can also do it on a specific dispatcher instance. + +For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingDispatcher' you can do it through their constructor + +.. code-block:: scala + + class MyActor extends Actor { + self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(name, throughput, mailboxCapacity) + ... + } + +For the 'ThreadBasedDispatcher', it is non-shareable between actors, and associates a dedicated Thread with the actor. +Making it bounded (by specifying a capacity) is optional, but if you do, you need to provide a pushTimeout (default is 10 seconds). When trying to send a message to the Actor it will throw a MessageQueueAppendFailedException("BlockingMessageTransferQueue transfer timed out") if the message cannot be added to the mailbox within the time specified by the pushTimeout. + +``_ +class MyActor extends Actor { + self.dispatcher = Dispatchers.newThreadBasedDispatcher(self, mailboxCapacity, pushTimeout, pushTimeoutUnit) + ... +} +``_ diff --git a/akka-docs/pending/event-handler.rst b/akka-docs/pending/event-handler.rst new file mode 100644 index 0000000000..18eefefb0a --- /dev/null +++ b/akka-docs/pending/event-handler.rst @@ -0,0 +1,96 @@ +Event Handler +============= + +There is an Event Handler which takes the place of a logging system in Akka: + +.. code-block:: scala + + akka.event.EventHandler + +You can configure which event handlers should be registered at boot time. That is done using the 'event-handlers' element in akka.conf. Here you can also define the log level. + +.. code-block:: ruby + + akka { + event-handlers = ["akka.event.EventHandler$DefaultListener"] # event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT) + event-handler-level = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG + } + +The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an SLF4J event handler available in the 'akka-slf4j.jar' module. Read more about it `here `_. + +Example of creating a listener from Scala (from Java you just have to create an 'UntypedActor' and create a handler for these messages): + +.. code-block:: scala + + val errorHandlerEventListener = Actor.actorOf(new Actor { + self.dispatcher = EventHandler.EventHandlerDispatcher + + def receive = { + case EventHandler.Error(cause, instance, message) => ... + case EventHandler.Warning(instance, message) => ... + case EventHandler.Info(instance, message) => ... + case EventHandler.Debug(instance, message) => ... + case genericEvent => ... + } + }) + +To add the listener: + +.. code-block:: scala + + EventHandler.addListener(errorHandlerEventListener) + +To remove the listener: + +.. code-block:: scala + + EventHandler.removeListener(errorHandlerEventListener) + +To log an event: + +.. code-block:: scala + + EventHandler.notify(EventHandler.Error(exception, this, message)) + + EventHandler.notify(EventHandler.Warning(this, message)) + + EventHandler.notify(EventHandler.Info(this, message)) + + EventHandler.notify(EventHandler.Debug(this, message)) + + EventHandler.notify(object) + +You can also use one of the direct methods (for a bit better performance): + +.. code-block:: scala + + EventHandler.error(exception, this, message) + + EventHandler.error(this, message) + + EventHandler.warning(this, message) + + EventHandler.info(this, message) + + EventHandler.debug(this, message) + +The event handler allows you to send an arbitrary object to the handler which you can handle in your event handler listener. The default listener prints it's toString String out to STDOUT. + +.. code-block:: scala + + EventHandler.notify(anyRef) + +The methods take a call-by-name parameter for the message to avoid object allocation and execution if level is disabled. The following formatting function will not be evaluated if level is INFO, WARNING, or ERROR. + +.. code-block:: scala + + EventHandler.debug(this, "Processing took %s ms".format(duration)) + +From Java you need to nest the call in an if statement to achieve the same thing. + +``_ +if (EventHandler.isDebugEnabled()) { + EventHandler.debug(this, String.format("Processing took %s ms", duration)); +} + +``_ diff --git a/akka-docs/pending/external-sample-projects.rst b/akka-docs/pending/external-sample-projects.rst new file mode 100644 index 0000000000..35a54c3c80 --- /dev/null +++ b/akka-docs/pending/external-sample-projects.rst @@ -0,0 +1,242 @@ +External Sample Projects +======================== + +Here are some external sample projects created by Akka's users. + +Camel in Action - Akka samples +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Akka samples for the upcoming Camel in Action book by Martin Krasser. +``_ + +CQRS impl using Scalaz and Akka +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An implementation of CQRS using scalaz for functional domain models and Akka for event sourcing. +``_ + +Example of using Comet with Akka Mist +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Movie store +^^^^^^^^^^^ + +Code for a book on Scala/Akka. +Showcasing Remote Actors. +``_ + +Estimating Pi with Akka +^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Running Akka on Android +^^^^^^^^^^^^^^^^^^^^^^^ + +Sample showing Dining Philosophers running in UI on Android. +``_ +``_ + +Remote chat application using Java API +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Remote chat application using Java API +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A sample chat application using the Java API for Akka. +Port of the Scala API chat sample application in the Akka repository. +``_ + +Sample parallel computing with Akka and Scala API +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka, Facebook Graph API, WebGL sample +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Showcasing Akka Mist HTTP module +``_ + +Akka Mist Sample +^^^^^^^^^^^^^^^^ + +``_ + +Another Akka Mist Sample +^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Bank application +^^^^^^^^^^^^^^^^ + +Showcasing Transactors and STM. +``_ + +Ant simulation 1 +^^^^^^^^^^^^^^^^ + +Traveling salesman problem. Inspired by Clojure's Ant demo. Uses SPDE for GUI. Idiomatic Scala/Akka code. +Good example on how to use Actors and STM +``_ + +Ant simulation 2 +^^^^^^^^^^^^^^^^ + +Traveling salesman problem. Close to straight port by Clojure's Ant demo. Uses Swing for GUI. +Another nice example on how to use Actors and STM +``_ + +The santa clause STM example by SPJ using Akka +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka trading system +^^^^^^^^^^^^^^^^^^^ + +``_ + +Snowing version of Game of Life in Akka +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka Web (REST/Comet) template project +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A sbt-based, scala Akka project that sets up a web project with REST and comet support +``_ + +Various samples on how to use Akka +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +From the May Chciago-Area Scala Enthusiasts Meeting +``_ + +Absurd concept for a ticket sales & inventory system, using Akka framework +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka sports book sample: Java API +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Sample of using the Finite State Machine (FSM) DSL +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka REST, Jetty, SBT template project +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Great starting point for building an Akka application. +``_ + +Samples of various Akka features (in Scala) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ +Fork at ``_ + +A sample sbt setup for running the akka-sample-chat +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka Benchmark project +^^^^^^^^^^^^^^^^^^^^^^ + +Benches Akka against various other actors and concurrency tools +``_ + +Typed Actor (Java API) sample project +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka PI calculation sample project +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka Vaadin Ice sample +^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Port of Jersey (JAX-RS) samples to Akka +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka Expect Testing +^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka Java API playground +^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Family web page build with Scala, Lift, Akka, Redis, and Facebook Connect +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +An example of queued computation tasks using Akka +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +The samples for the New York Scala Enthusiasts Meetup discussing Akka +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ +``_ + +Container managed thread pools for Akka Dispatchers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +"Lock" Finite State Machine demo with Akka +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Template w/ Intellij stuff for random akka playing around (with Bivvy) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka chat using Akka Java API by Mario Fusco +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Projects using the removed Akka Persistence modules +=================================================== + +Akka Terrastore sample +^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Akka Persistence for Force.com +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ + +Template for Akka and Redis +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``_ diff --git a/akka-docs/pending/fault-tolerance-java.rst b/akka-docs/pending/fault-tolerance-java.rst new file mode 100644 index 0000000000..5c6510bcbd --- /dev/null +++ b/akka-docs/pending/fault-tolerance-java.rst @@ -0,0 +1,463 @@ +Fault Tolerance Through Supervisor Hierarchies (Java) +===================================================== + +Module stability: **SOLID** + +The "let it crash" approach to fault/error handling, implemented by linking actors, is very different to what Java and most non-concurrency oriented languages/frameworks have adopted. It’s a way of dealing with failure that is designed for concurrent and distributed systems. + +Concurrency +----------- + +Throwing an exception in concurrent code (let’s assume we are using non-linked actors), will just simply blow up the thread that currently executes the actor. + +# There is no way to find out that things went wrong (apart from inspecting the stack trace). +# There is nothing you can do about it. + +Here actors provide a clean way of getting notification of the error and do something about it. + +Linking actors also allow you to create sets of actors where you can be sure that either: +# All are dead +# None are dead + +This is very useful when you have thousands of concurrent actors. Some actors might have implicit dependencies and together implement a service, computation, user session etc. + +It encourages non-defensive programming. Don’t try to prevent things from go wrong, because they will, whether you want it or not. Instead; expect failure as a natural state in the life-cycle of your app, crash early and let someone else (that sees the whole picture), deal with it. + +Distributed actors +------------------ + +You can’t build a fault-tolerant system with just one single box - you need at least two. Also, you (usually) need to know if one box is down and/or the service you are talking to on the other box is down. Here actor supervision/linking is a critical tool for not only monitoring the health of remote services, but to actually manage the service, do something about the problem if the actor or node is down. Such as restarting actors on the same node or on another node. + +In short, it is a very different way of thinking, but a way that is very useful (if not critical) to building fault-tolerant highly concurrent and distributed applications, which is as valid if you are writing applications for the JVM or the Erlang VM (the origin of the idea of "let-it-crash" and actor supervision). + +Supervision +----------- + +Supervisor hierarchies originate from `Erlang’s OTP framework `_. + +A supervisor is responsible for starting, stopping and monitoring its child processes. The basic idea of a supervisor is that it should keep its child processes alive by restarting them when necessary. This makes for a completely different view on how to write fault-tolerant servers. Instead of trying all things possible to prevent an error from happening, this approach embraces failure. It shifts the view to look at errors as something natural and something that **will** happen, instead of trying to prevent it; embraces it. Just ‘Let It Crash™’, since the components will be reset to a stable state and restarted upon failure. + +Akka has two different restart strategies; All-For-One and One-For-One. Best explained using some pictures (referenced from `erlang.org `_ ): + +OneForOne +^^^^^^^^^ + +The OneForOne fault handler will restart only the component that has crashed. +``_ + +AllForOne +^^^^^^^^^ + +The AllForOne fault handler will restart all the components that the supervisor is managing, including the one that have crashed. This strategy should be used when you have a certain set of components that are coupled in some way that if one is crashing they all need to be reset to a stable state before continuing. +``_ + +Restart callbacks +^^^^^^^^^^^^^^^^^ + +There are two different callbacks that an UntypedActor or TypedActor can hook in to: + +* Pre restart +* Post restart + +These are called prior to and after the restart upon failure and can be used to clean up and reset/reinitialize state upon restart. This is important in order to reset the component failure and leave the component in a fresh and stable state before consuming further messages. + +Defining a supervisor's restart strategy +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Both the Typed Actor supervisor configuration and the Actor supervisor configuration take a ‘FaultHandlingStrategy’ instance which defines the fault management. The different strategies are: + +* AllForOne +* OneForOne + +These have the semantics outlined in the section above. + +Here is an example of how to define a restart strategy: + +.. code-block:: java + + new AllForOneStrategy( //Or OneForOneStrategy + new Class[]{ Exception.class }, //List of Exceptions/Throwables to handle + 3, // maximum number of restart retries + 5000 // within time in millis + ) + +Defining actor life-cycle +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The other common configuration element is the ‘LifeCycle’ which defines the life-cycle. The supervised actor can define one of two different life-cycle configurations: + +* Permanent: which means that the actor will always be restarted. +* Temporary: which means that the actor will **not** be restarted, but it will be shut down through the regular shutdown process so the 'postStop' callback function will called. + +Here is an example of how to define the life-cycle: + +.. code-block:: java + + import static akka.config.Supervision.*; + + getContext().setLifeCycle(permanent()); //permanent() means that the component will always be restarted + getContext().setLifeCycle(temporary()); //temporary() means that the component will not be restarted, but rather shut down normally + +Supervising Untyped Actor +------------------------- + +Declarative supervisor configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Actor’s supervision can be declaratively defined by creating a ‘Supervisor’ factory object. Here is an example: + +.. code-block:: java + + import static akka.config.Supervision.*; + import static akka.actor.Actors.*; + + Supervisor supervisor = new Supervisor( + new SupervisorConfig( + new AllForOneStrategy(new Class[]{Exception.class}, 3, 5000), + new Supervise[] { + new Supervise( + actorOf(MyActor1.class), + permanent()), + Supervise( + actorOf(MyActor2.class), + permanent()) + })); + +Supervisors created like this are implicitly instantiated and started. + +You can link and unlink actors from a declaratively defined supervisor using the 'link' and 'unlink' methods: + +.. code-block:: java + + Supervisor supervisor = new Supervisor(...); + supervisor.link(..); + supervisor.unlink(..); + +You can also create declarative supervisors through the 'SupervisorFactory' factory object. Use this factory instead of the 'Supervisor' factory object if you want to control instantiation and starting of the Supervisor, if not then it is easier and better to use the 'Supervisor' factory object. + +Example usage: + +.. code-block:: java + + import static akka.config.Supervision.*; + import static akka.actor.Actors.*; + + SupervisorFactory factory = new SupervisorFactory( + new SupervisorConfig( + new OneForOneStrategy(new Class[]{Exception.class}, 3, 5000), + new Supervise[] { + new Supervise( + actorOf(MyActor1.class), + permanent()), + Supervise( + actorOf(MyActor2.class), + temporary()) + })); + +Then create a new instance our Supervisor and start it up explicitly. + +.. code-block:: java + + SupervisorFactory supervisor = factory.newInstance(); + supervisor.start(); // start up all managed servers + +Declaratively define actors as remote services +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can expose your actors as remote services by specifying the registerAsRemote to **true** in Supervise. + +Here is an example: + +.. code-block:: java + + import static akka.config.Supervision.*; + import static akka.actor.Actors.*; + + Supervisor supervisor = new Supervisor( + new SupervisorConfig( + new AllForOneStrategy(new Class[]{Exception.class}, 3, 5000), + new Supervise[] { + new Supervise( + actorOf(MyActor1.class), + permanent(), + true) + })); + +Programmatical linking and supervision of Untyped Actors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Untyped Actors can at runtime create, spawn, link and supervise other actors. Linking and unlinking is done using one of the 'link' and 'unlink' methods available in the 'ActorRef' (therefore prefixed with getContext() in these examples). + +Here is the API and how to use it from within an 'Actor': + +.. code-block:: java + + // link and unlink actors + getContext().link(actorRef); + getContext().unlink(actorRef); + + // starts and links Actors atomically + getContext().startLink(actorRef); + getContext().startLinkRemote(actorRef); + + // spawns (creates and starts) actors + getContext().spawn(MyActor.class); + getContext().spawnRemote(MyActor.class); + + // spawns and links Actors atomically + getContext().spawnLink(MyActor.class); + getContext().spawnLinkRemote(MyActor.class); + +A child actor can tell the supervising actor to unlink him by sending him the 'Unlink(this)' message. When the supervisor receives the message he will unlink and shut down the child. The supervisor for an actor is available in the 'supervisor: Option[Actor]' method in the 'ActorRef' class. Here is how it can be used. + +.. code-block:: java + + ActorRef supervisor = getContext().getSupervisor(); + if (supervisor != null) supervisor.sendOneWay(new Unlink(getContext())) + +The supervising actor's side of things +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If a linked Actor is failing and throws an exception then an ‘new Exit(deadActor, cause)’ message will be sent to the supervisor (however you should never try to catch this message in your own message handler, it is managed by the runtime). + +The supervising Actor also needs to define a fault handler that defines the restart strategy the Actor should accommodate when it traps an ‘Exit’ message. This is done by setting the ‘setFaultHandler’ method. + +The different options are: + +* AllForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) + * trapExit is an Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle +* OneForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) + * trapExit is an Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle + +Here is an example: + +.. code-block:: java + + getContext().setFaultHandler(new AllForOneStrategy(new Class[]{MyException.class, IOException.class}, 3, 1000)); + +Putting all this together it can look something like this: + +.. code-block:: java + + class MySupervisor extends UntypedActor { + public MySupervisor() { + getContext().setFaultHandler(new AllForOneStrategy(new Class[]{MyException.class, IOException.class}, 3, 1000)); + } + + public void onReceive(Object message) throws Exception { + if (message instanceof Register) { + Register event = (Register)message; + UntypedActorRef actor = event.getActor(); + context.link(actor); + } else throw new IllegalArgumentException("Unknown message: " + message); + } + } + +You can also link an actor from outside the supervisor like this: + +.. code-block:: java + + UntypedActor supervisor = Actors.registry().actorsFor(MySupervisor.class])[0]; + supervisor.link(actorRef); + +The supervised actor's side of things +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The supervised actor needs to define a life-cycle. This is done by setting the lifeCycle field as follows: + +.. code-block:: java + + import static akka.config.Supervision.*; + + getContext().setLifeCycle(permanent()); // Permanent or Temporary + +Default is 'Permanent' so if you don't set the life-cycle then that is what you get. + +In the supervised Actor you can override the ‘preRestart’ and ‘postRestart’ callback methods to add hooks into the restart process. These methods take the reason for the failure, e.g. the exception that caused termination and restart of the actor as argument. It is in these methods that **you** have to add code to do cleanup before termination and initialization after restart. Here is an example: + +.. code-block:: java + + class FaultTolerantService extends UntypedActor { + + @Override + public void preRestart(Throwable reason) { + ... // clean up before restart + } + + @Override + public void postRestart(Throwable reason) { + ... // reinit stable state after restart + } + } + +Reply to initial senders +^^^^^^^^^^^^^^^^^^^^^^^^ + +Supervised actors have the option to reply to the initial sender within preRestart, postRestart and postStop. A reply within these methods is possible after receive has thrown an exception. When receive returns normally it is expected that any necessary reply has already been done within receive. Here's an example. + +.. code-block:: java + + public class FaultTolerantService extends UntypedActor { + public void onReceive(Object msg) { + // do something that may throw an exception + // ... + + getContext().replySafe("ok"); + } + + @Override + public void preRestart(Throwable reason) { + getContext().replySafe(reason.getMessage()); + } + + @Override + public void postStop() { + getContext().replySafe("stopped by supervisor"); + } + } + +* A reply within preRestart or postRestart must be a safe reply via getContext().replySafe() because a getContext().replyUnsafe() will throw an exception when the actor is restarted without having failed. This can be the case in context of AllForOne restart strategies. +* A reply within postStop must be a safe reply via getContext().replySafe() because a getContext().replyUnsafe() will throw an exception when the actor has been stopped by the application (and not by a supervisor) after successful execution of receive (or no execution at all). + +Handling too many actor restarts within a specific time limit +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you remember, when you define the 'RestartStrategy' you also defined maximum number of restart retries within time in millis. + +.. code-block:: java + + new AllForOneStrategy( // FaultHandlingStrategy policy (AllForOneStrategy or OneForOneStrategy) + new Class[]{MyException.class, IOException.class}, //What types of errors will be handled + 3, // maximum number of restart retries + 5000 // within time in millis + ); + +Now, what happens if this limit is reached? + +What will happen is that the failing actor will send a system message to its supervisor called 'MaximumNumberOfRestartsWithinTimeRangeReached' with the following these properties: + +* victim: ActorRef +* maxNrOfRetries: int +* withinTimeRange: int +* lastExceptionCausingRestart: Throwable + +If you want to be able to take action upon this event (highly recommended) then you have to create a message handle for it in the supervisor. + +Here is an example: + +.. code-block:: java + + public class SampleUntypedActorSupervisor extends UntypedActor { + ... + + public void onReceive(Object message) throws Exception { + if (message instanceof MaximumNumberOfRestartsWithinTimeRangeReached) { + MaximumNumberOfRestartsWithinTimeRangeReached event = (MaximumNumberOfRestartsWithinTimeRangeReached)message; + ... = event.getVictim(); + ... = event.getMaxNrOfRetries(); + ... = event.getWithinTimeRange(); + ... = event.getLastExceptionCausingRestart(); + } else throw new IllegalArgumentException("Unknown message: " + message); + } + } + +You will also get this log warning similar to this: + +.. code-block:: console + + WAR [20100715-14:05:25.821] actor: Maximum number of restarts [5] within time range [5000] reached. + WAR [20100715-14:05:25.821] actor: Will *not* restart actor [Actor[akka.actor.SupervisorHierarchySpec$CountDownActor:1279195525812]] anymore. + WAR [20100715-14:05:25.821] actor: Last exception causing restart was [akka.actor.SupervisorHierarchySpec$FireWorkerException: Fire the worker!]. + +If you don't define a message handler for this message then you don't get an error but the message is simply not sent to the supervisor. Instead you will get a log warning. + +Supervising Typed Actors +------------------------ + +Declarative supervisor configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To configure Typed Actors for supervision you have to consult the ‘TypedActorConfigurator’ and its ‘configure’ method. This method takes a ‘RestartStrategy’ and an array of ‘Component’ definitions defining the Typed Actors and their ‘LifeCycle’. Finally you call the ‘supervise’ method to start everything up. The Java configuration elements reside in the ‘akka.config.JavaConfig’ class and need to be imported statically. + +Here is an example: + +.. code-block:: java + + import static akka.config.Supervision.*; + import static akka.config.SupervisorConfig.*; + + TypedActorConfigurator manager = new TypedActorConfigurator(); + + manager.configure( + new AllForOneStrategy(new Class[]{Exception.class}, 3, 1000), + new SuperviseTypedActor[] { + new SuperviseTypedActor( + Foo.class, + FooImpl.class, + temporary(), + 1000), + new SuperviseTypedActor( + Bar.class, + BarImpl.class, + permanent(), + 1000) + }).supervise(); + +Then you can retrieve the Typed Actor as follows: + +.. code-block:: java + + Foo foo = (Foo) manager.getInstance(Foo.class); + +Restart callbacks +^^^^^^^^^^^^^^^^^ + +In the supervised TypedActor you can override the ‘preRestart’ and ‘postRestart’ callback methods to add hooks into the restart process. These methods take the reason for the failure, e.g. the exception that caused termination and restart of the actor as argument. It is in these methods that **you** have to add code to do cleanup before termination and initialization after restart. Here is an example: + +.. code-block:: java + + class FaultTolerantService extends TypedActor { + + @Override + public void preRestart(Throwable reason) { + ... // clean up before restart + } + + @Override + public void postRestart(Throwable reason) { + ... // reinit stable state after restart + } + } + +Programatical linking and supervision of TypedActors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +TypedActors can be linked an unlinked just like UntypedActors: + +.. code-block:: java + + TypedActor.link(supervisor, supervised); + + TypedActor.unlink(supervisor, supervised); + +If the parent TypedActor (supervisor) wants to be able to do handle failing child TypedActors, e.g. be able restart the linked TypedActor according to a given fault handling scheme then it has to set its ‘trapExit’ flag to an array of Exceptions that it wants to be able to trap: + +.. code-block:: java + + TypedActor.faultHandler(supervisor, new AllForOneStrategy(new Class[]{IOException.class}, 3, 2000)); + +For convenience there is an overloaded link that takes trapExit and faultHandler for the supervisor as arguments. Here is an example: + +.. code-block:: java + import static akka.actor.TypedActor.*; + import static akka.config.Supervision.*; + + foo = newInstance(Foo.class, FooImpl.class, 1000); + bar = newInstance(Bar.class, BarImpl.class, 1000); + + link(foo, bar, new AllForOneStrategy(new Class[]{IOException.class}, 3, 2000)); + + // alternative: chaining + bar = faultHandler(foo, new AllForOneStrategy(new Class[]{IOException.class}, 3, 2000)).newInstance(Bar.class, 1000); + + link(foo, bar); diff --git a/akka-docs/pending/fault-tolerance-scala.rst b/akka-docs/pending/fault-tolerance-scala.rst new file mode 100644 index 0000000000..279e69b849 --- /dev/null +++ b/akka-docs/pending/fault-tolerance-scala.rst @@ -0,0 +1,422 @@ +Fault Tolerance Through Supervisor Hierarchies (Scala) +====================================================== + +Module stability: **SOLID** + +The "let it crash" approach to fault/error handling, implemented by linking actors, is very different to what Java and most non-concurrency oriented languages/frameworks have adopted. It's a way of dealing with failure that is designed for concurrent and distributed systems. + +Concurrency +----------- + +Throwing an exception in concurrent code (let's assume we are using non-linked actors), will just simply blow up the thread that currently executes the actor. + +# There is no way to find out that things went wrong (apart from inspecting the stack trace). +# There is nothing you can do about it. + +Here actors provide a clean way of getting notification of the error and do something about it. + +Linking actors also allow you to create sets of actors where you can be sure that either: + +# All are dead +# None are dead + +This is very useful when you have thousands of concurrent actors. Some actors might have implicit dependencies and together implement a service, computation, user session etc. + +It encourages non-defensive programming. Don't try to prevent things from go wrong, because they will, whether you want it or not. Instead; expect failure as a natural state in the life-cycle of your app, crash early and let someone else (that sees the whole picture), deal with it. + +Distributed actors +------------------ + +You can't build a fault-tolerant system with just one single box - you need at least two. Also, you (usually) need to know if one box is down and/or the service you are talking to on the other box is down. Here actor supervision/linking is a critical tool for not only monitoring the health of remote services, but to actually manage the service, do something about the problem if the actor or node is down. Such as restarting actors on the same node or on another node. + +In short, it is a very different way of thinking, but a way that is very useful (if not critical) to building fault-tolerant highly concurrent and distributed applications, which is as valid if you are writing applications for the JVM or the Erlang VM (the origin of the idea of "let-it-crash" and actor supervision). + +Supervision +----------- + +Supervisor hierarchies originate from `Erlang's OTP framework `_. + +A supervisor is responsible for starting, stopping and monitoring its child processes. The basic idea of a supervisor is that it should keep its child processes alive by restarting them when necessary. This makes for a completely different view on how to write fault-tolerant servers. Instead of trying all things possible to prevent an error from happening, this approach embraces failure. It shifts the view to look at errors as something natural and something that **will** happen, instead of trying to prevent it; embraces it. Just "Let It Crash", since the components will be reset to a stable state and restarted upon failure. + +Akka has two different restart strategies; All-For-One and One-For-One. Best explained using some pictures (referenced from `erlang.org `_ ): + +OneForOne +^^^^^^^^^ + +The OneForOne fault handler will restart only the component that has crashed. +``_ + +AllForOne +^^^^^^^^^ + +The AllForOne fault handler will restart all the components that the supervisor is managing, including the one that have crashed. This strategy should be used when you have a certain set of components that are coupled in some way that if one is crashing they all need to be reset to a stable state before continuing. +``_ + +Restart callbacks +^^^^^^^^^^^^^^^^^ + +There are two different callbacks that the Typed Actor and Actor can hook in to: + +* Pre restart +* Post restart + +These are called prior to and after the restart upon failure and can be used to clean up and reset/reinitialize state upon restart. This is important in order to reset the component failure and leave the component in a fresh and stable state before consuming further messages. + +Defining a supervisor's restart strategy +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Both the Typed Actor supervisor configuration and the Actor supervisor configuration take a 'FaultHandlingStrategy' instance which defines the fault management. The different strategies are: + +* AllForOne +* OneForOne + +These have the semantics outlined in the section above. + +Here is an example of how to define a restart strategy: + +.. code-block:: scala + + AllForOneStrategy( //FaultHandlingStrategy; AllForOneStrategy or OneForOneStrategy + List(classOf[Exception]), //What exceptions will be handled + 3, // maximum number of restart retries + 5000 // within time in millis + ) + +Defining actor life-cycle +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The other common configuration element is the "LifeCycle' which defines the life-cycle. The supervised actor can define one of two different life-cycle configurations: + +* Permanent: which means that the actor will always be restarted. +* Temporary: which means that the actor will **not** be restarted, but it will be shut down through the regular shutdown process so the 'postStop' callback function will called. + +Here is an example of how to define the life-cycle: + +.. code-block:: scala + + Permanent // means that the component will always be restarted + Temporary // means that it will not be restarted, but it will be shut + // down through the regular shutdown process so the 'postStop' hook will called + +Supervising Actors +------------------ + +Declarative supervisor configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Actor's supervision can be declaratively defined by creating a "Supervisor' factory object. Here is an example: + +.. code-block:: scala + + val supervisor = Supervisor( + SupervisorConfig( + AllForOneStrategy(List(classOf[Exception]), 3, 1000), + Supervise( + actorOf[MyActor1], + Permanent) :: + Supervise( + actorOf[MyActor2], + Permanent) :: + Nil)) + +Supervisors created like this are implicitly instantiated and started. + +You can link and unlink actors from a declaratively defined supervisor using the 'link' and 'unlink' methods: + +.. code-block:: scala + + val supervisor = Supervisor(...) + supervisor.link(..) + supervisor.unlink(..) + +You can also create declarative supervisors through the 'SupervisorFactory' factory object. Use this factory instead of the 'Supervisor' factory object if you want to control instantiation and starting of the Supervisor, if not then it is easier and better to use the 'Supervisor' factory object. + +Example usage: + +.. code-block:: scala + + val factory = SupervisorFactory( + SupervisorConfig( + OneForOneStrategy(List(classOf[Exception]), 3, 10), + Supervise( + myFirstActor, + Permanent) :: + Supervise( + mySecondActor, + Permanent) :: + Nil)) + +Then create a new instance our Supervisor and start it up explicitly. + +.. code-block:: scala + + val supervisor = factory.newInstance + supervisor.start // start up all managed servers + +Declaratively define actors as remote services +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can declaratively define an actor to be available as a remote actor by specifying **true** for registerAsRemoteService. + +Here is an example: + +.. code-block:: scala + + val supervisor = Supervisor( + SupervisorConfig( + AllForOneStrategy(List(classOf[Exception]), 3, 1000), + Supervise( + actorOf[MyActor1], + Permanent, + **true**) + :: Nil)) + +Programmatical linking and supervision of Actors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Actors can at runtime create, spawn, link and supervise other actors. Linking and unlinking is done using one of the 'link' and 'unlink' methods available in the 'ActorRef' (therefore prefixed with 'self' in these examples). + +Here is the API and how to use it from within an 'Actor': + +.. code-block:: scala + + // link and unlink actors + self.link(actorRef) + self.unlink(actorRef) + + // starts and links Actors atomically + self.startLink(actorRef) + + // spawns (creates and starts) actors + self.spawn[MyActor] + self.spawnRemote[MyActor] + + // spawns and links Actors atomically + self.spawnLink[MyActor] + self.spawnLinkRemote[MyActor] + +A child actor can tell the supervising actor to unlink him by sending him the 'Unlink(this)' message. When the supervisor receives the message he will unlink and shut down the child. The supervisor for an actor is available in the 'supervisor: Option[Actor]' method in the 'ActorRef' class. Here is how it can be used. + +.. code-block:: scala + + if (supervisor.isDefined) supervisor.get ! Unlink(this) + + // Or shorter using 'foreach': + + supervisor.foreach(_ ! Unlink(this)) + +The supervising actor's side of things +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If a linked Actor is failing and throws an exception then an "Exit(deadActor, cause)' message will be sent to the supervisor (however you should never try to catch this message in your own message handler, it is managed by the runtime). + +The supervising Actor also needs to define a fault handler that defines the restart strategy the Actor should accommodate when it traps an "Exit' message. This is done by setting the "faultHandler' field. + +.. code-block:: scala + + protected var faultHandler: FaultHandlingStrategy + +The different options are: + +* AllForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) + * trapExit is a List or Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle +* OneForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) + * trapExit is a List or Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle + +Here is an example: + +.. code-block:: scala + + self.faultHandler = AllForOneStrategy(List(classOf[Throwable]), 3, 1000) + +Putting all this together it can look something like this: + +.. code-block:: scala + + class MySupervisor extends Actor { + self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 5, 5000) + + def receive = { + case Register(actor) => + self.link(actor) + } + } + +You can also link an actor from outside the supervisor like this: + +.. code-block:: scala + + val supervisor = Actor.registry.actorsFor(classOf[MySupervisor]).head + supervisor.link(actor) + +The supervised actor's side of things +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The supervised actor needs to define a life-cycle. This is done by setting the lifeCycle field as follows: + +.. code-block:: scala + + self.lifeCycle = Permanent // Permanent or Temporary or UndefinedLifeCycle + +In the supervised Actor you can override the "preRestart' and "postRestart' callback methods to add hooks into the restart process. These methods take the reason for the failure, e.g. the exception that caused termination and restart of the actor as argument. It is in these methods that **you** have to add code to do cleanup before termination and initialization after restart. Here is an example: + +.. code-block:: scala + + class FaultTolerantService extends Actor { + override def preRestart(reason: Throwable) { + ... // clean up before restart + } + + override def postRestart(reason: Throwable) { + ... // reinit stable state after restart + } + } + +Reply to initial senders +^^^^^^^^^^^^^^^^^^^^^^^^ + +Supervised actors have the option to reply to the initial sender within preRestart, postRestart and postStop. A reply within these methods is possible after receive has thrown an exception. When receive returns normally it is expected that any necessary reply has already been done within receive. Here's an example. + +.. code-block:: scala + + class FaultTolerantService extends Actor { + def receive = { + case msg => { + // do something that may throw an exception + // ... + + self.reply("ok") + } + } + + override def preRestart(reason: scala.Throwable) { + self.reply_?(reason.getMessage) + } + + override def postStop { + self.reply_?("stopped by supervisor") + } + } + +* A reply within preRestart or postRestart must be a safe reply via self.reply_? because an unsafe self.reply will throw an exception when the actor is restarted without having failed. This can be the case in context of AllForOne restart strategies. +* A reply within postStop must be a safe reply via self.reply_? because an unsafe self.reply will throw an exception when the actor has been stopped by the application (and not by a supervisor) after successful execution of receive (or no execution at all). + +Handling too many actor restarts within a specific time limit +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you remember, when you define the 'RestartStrategy' you also defined maximum number of restart retries within time in millis. + +.. code-block:: scala + + AllForOneStrategy( //Restart policy, AllForOneStrategy or OneForOneStrategy + List(classOf[Exception]), //What kinds of exception it will handle + 3, // maximum number of restart retries + 5000 // within time in millis + ) + +Now, what happens if this limit is reached? + +What will happen is that the failing actor will send a system message to its supervisor called 'MaximumNumberOfRestartsWithinTimeRangeReached' with the following signature: + +.. code-block:: scala + + case class MaximumNumberOfRestartsWithinTimeRangeReached( + victim: ActorRef, maxNrOfRetries: Int, withinTimeRange: Int, lastExceptionCausingRestart: Throwable) + +If you want to be able to take action upon this event (highly recommended) then you have to create a message handle for it in the supervisor. + +Here is an example: + +.. code-block:: scala + + val supervisor = actorOf(new Actor{ + self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 5, 5000) + protected def receive = { + case MaximumNumberOfRestartsWithinTimeRangeReached( + victimActorRef, maxNrOfRetries, withinTimeRange, lastExceptionCausingRestart) => + ... // handle the error situation + } + }).start() + +You will also get this log warning similar to this: + +.. code-block:: console + + WAR [20100715-14:05:25.821] actor: Maximum number of restarts [5] within time range [5000] reached. + WAR [20100715-14:05:25.821] actor: Will *not* restart actor [Actor[akka.actor.SupervisorHierarchySpec$CountDownActor:1279195525812]] anymore. + WAR [20100715-14:05:25.821] actor: Last exception causing restart was [akka.actor.SupervisorHierarchySpec$FireWorkerException: Fire the worker!]. + +If you don't define a message handler for this message then you don't get an error but the message is simply not sent to the supervisor. Instead you will get a log warning. + +Supervising Typed Actors +------------------------ + +Declarative supervisor configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To configure Typed Actors for supervision you have to consult the "TypedActorConfigurator' and its "configure' method. This method takes a "RestartStrategy' and an array of "Component' definitions defining the Typed Actors and their "LifeCycle'. Finally you call the "supervise' method to start everything up. The configuration elements reside in the "akka.config.JavaConfig' class and need to be imported statically. + +Here is an example: + +.. code-block:: scala + + import akka.config.Supervision._ + + val manager = new TypedActorConfigurator + + manager.configure( + AllForOneStrategy(List(classOf[Exception]), 3, 1000), + List( + SuperviseTypedActor( + Foo.class, + FooImpl.class, + Permanent, + 1000), + new SuperviseTypedActor( + Bar.class, + BarImpl.class, + Permanent, + 1000) + )).supervise + +Then you can retrieve the Typed Actor as follows: + +.. code-block:: java + + Foo foo = manager.getInstance(classOf[Foo]) + +Restart callbacks +^^^^^^^^^^^^^^^^^ + +Programatical linking and supervision of TypedActors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +TypedActors can be linked an unlinked just like actors - in fact the linking is done on the underlying actor: + +.. code-block:: scala + + TypedActor.link(supervisor, supervised) + + TypedActor.unlink(supervisor, supervised) + +If the parent TypedActor (supervisor) wants to be able to do handle failing child TypedActors, e.g. be able restart the linked TypedActor according to a given fault handling scheme then it has to set its 'trapExit' flag to an array of Exceptions that it wants to be able to trap: + +.. code-block:: scala + + TypedActor.faultHandler(supervisor, AllForOneStrategy(Array(classOf[IOException]), 3, 2000)) + +For convenience there is an overloaded link that takes trapExit and faultHandler for the supervisor as arguments. Here is an example: + +.. code-block:: scala + import akka.actor.TypedActor._ + + val foo = newInstance(classOf[Foo], 1000) + val bar = newInstance(classOf[Bar], 1000) + + link(foo, bar, new AllForOneStrategy(Array(classOf[IOException]), 3, 2000)) + + // alternative: chaining + bar = faultHandler(foo, new AllForOneStrategy(Array(classOf[IOException]), 3, 2000)) + .newInstance(Bar.class, 1000) + + link(foo, bar diff --git a/akka-docs/pending/fsm-scala.rst b/akka-docs/pending/fsm-scala.rst new file mode 100644 index 0000000000..9471b39165 --- /dev/null +++ b/akka-docs/pending/fsm-scala.rst @@ -0,0 +1,218 @@ +FSM +=== + +Module stability: **STABLE** + +The FSM (Finite State Machine) is available as a mixin for the akka Actor and is best described in the `Erlang design principals <@http://www.erlang.org/documentation/doc-4.8.2/doc/design_principles/fsm.html>`_ + +A FSM can be described as a set of relations of the form: +> **State(S) x Event(E) -> Actions (A), State(S')** + +These relations are interpreted as meaning: +> *If we are in state S and the event E occurs, we should perform the actions A and make a transition to the state S'.* + +State Definitions +----------------- + +To demonstrate the usage of states we start with a simple state only FSM without state data. The state can be of any type so for this example we create the states A, B and C. + +.. code-block:: scala + + sealed trait ExampleState + case object A extends ExampleState + case object B extends ExampleState + case object C extends ExampleState + +Now lets create an object to influence the FSM and define the states and their behaviour. + +.. code-block:: scala + + import akka.actor.{Actor, FSM} + import FSM._ + import akka.util.duration._ + + case object Move + + class ABC extends Actor with FSM[ExampleState,Unit] { + + startWith(A, Unit) + + when(A) { + case Event(Move, _) => + log.info("Go to B and move on after 5 seconds") + goto(B) forMax (5 seconds) + } + + when(B) { + case Event(StateTimeout, _) => + log.info("Moving to C") + goto(C) + } + + when(C) { + case Event(Move, _) => + log.info("Stopping") + stop + } + + initialize // this checks validity of the initial state and sets up timeout if needed + } + +So we use 'when' to specify a state and define what needs to happen when we receive an event. We use 'goto' to go to another state. We use 'forMax' to tell for how long we maximum want to stay in that state before we receive a timeout notification. We use 'stop' to stop the FSM. And we use 'startWith' to specify which state to start with. The call to 'initialize' should be the last action done in the actor constructor. + +If we want to stay in the current state we can use (I'm hoping you can guess this by now) 'stay'. That can also be combined with the 'forMax' + +.. code-block:: scala + + when(C) { + case Event(unknown, _) => + stay forMax (2 seconds) + } + +The timeout can also be associated with the state itself, the choice depends on whether most of the transitions to the state require the same value for the timeout: + +.. code-block:: scala + + when(A) { + case Ev(Start(msg)) => // convenience extractor when state data not needed + goto(Timer) using msg + } + + when(B, stateTimeout = 12 seconds) { + case Event(StateTimeout, msg) => + target ! msg + case Ev(DifferentPause(dur : Duration)) => + stay forMax dur // overrides default state timeout for this single transition + } + +Unhandled Events +---------------- + +If a state doesn't handle a received event a warning is logged. If you want to do something with this events you can specify that with 'whenUnhandled' + +.. code-block:: scala + + whenUnhandled { + case Event(x, _) => log.info("Received unhandled event: " + x) + } + +Termination +----------- + +You can use 'onTermination' to specify custom code that is executed when the FSM is stopped. A reason is passed to tell how the FSM was stopped. + +.. code-block:: scala + + onTermination { + case Normal => log.info("Stopped normal") + case Shutdown => log.info("Stopped because of shutdown") + case Failure(cause) => log.error("Stopped because of failure: " + cause) + } + +State Transitions +----------------- + +When state transitions to another state we might want to know about this and take action. To specify this we can use 'onTransition' to capture the transitions. + +.. code-block:: scala + + onTransition { + case A -> B => log.info("Moving from A to B") + case _ -> C => log.info("Moving from something to C") + } + +Multiple onTransition blocks may be given and all will be execution while processing a transition. This enables you to associate your Actions either with the initial state of a processing step, or with the transition into the final state of a processing step. + +Transitions occur "between states" conceptually, which means after any actions you have put into the event handling block; this is obvious since the next state is only defined by the value returned by the event handling logic. You do not need to worry about the exact order with respect to setting the internal state variable, as everything within the FSM actor is running single-threaded anyway. + +It is also possible to pass a function object accepting two states to onTransition, in case your state handling logic is implemented as a method: + +.. code-block:: scala + + onTransition(handler _) + + private def handler(from: State, to: State) { + ... + } + +State Data +---------- + +The FSM can also hold state data that is attached to every event. The state data can be of any type but to demonstrate let's look at a lock with a String as state data holding the entered unlock code. +First we need two states for the lock: + +.. code-block:: scala + + sealed trait LockState + case object Locked extends LockState + case object Open extends LockState + +Now we can create a lock FSM that takes LockState as a state and a String as state data: + +.. code-block:: scala + + import akka.actor.{FSM, Actor} + import FSM._ + import akka.util.duration._ + + class Lock(code: String) extends Actor with FSM[LockState, String] { + + val emptyCode = "" + + when(Locked) { + // receive a digit and the code that we have so far + case Event(digit: Char, soFar) => { + // add the digit to what we have + soFar + digit match { + // not enough digits yet so stay using the incomplete code as the new state data + case incomplete if incomplete.length < code.length => + stay using incomplete + // code matched the one from the lock so go to Open state and reset the state data + case `code` => + log.info("Unlocked") + goto(Open) using emptyCode forMax (1 seconds) + // wrong code, stay Locked and reset the state data + case wrong => + log.error("Wrong code " + wrong) + stay using emptyCode + } + } + } + + when(Open) { + // after the timeout, go back to Locked state + case Event(StateTimeout, _) => { + log.info("Locked") + goto(Locked) + } + } + + startWith(Locked, emptyCode) + } + +To use the Lock you can run a small program like this: + +.. code-block:: scala + + object Lock { + + def main(args: Array[String]) { + + val lock = Actor.actorOf(new Lock("1234")).start() + + lock ! '1' + lock ! '2' + lock ! '3' + lock ! '4' + + Actor.registry.shutdownAll() + exit + } + } + +Dining Hakkers +-------------- + +A bigger FSM example can be found in the sources. +`Dining Hakkers using FSM <@https://github.com/jboner/akka/blob/master/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala#L1>`_ +`Dining Hakkers using become <@https://github.com/jboner/akka/blob/master/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala#L1>`_ diff --git a/akka-docs/pending/futures-scala.rst b/akka-docs/pending/futures-scala.rst new file mode 100644 index 0000000000..5cbfc08cea --- /dev/null +++ b/akka-docs/pending/futures-scala.rst @@ -0,0 +1,197 @@ +Futures (Scala) +=============== + +Introduction +------------ + +In Akka, a `Future `_ is a data structure used to retrieve the result of some concurrent operation. This operation is usually performed by an ``Actor`` or by the ``Dispatcher`` directly. This result can be accessed synchronously (blocking) or asynchronously (non-blocking). + +Use with Actors +--------------- + +There are generally two ways of getting a reply from an ``Actor``: the first is by a sent message (``actor ! msg``), which only works if the original sender was an ``Actor``) and the second is through a ``Future``. + +Using an ``Actor``\'s ``!!!`` method to send a message will return a Future. To wait for and retreive the actual result the simplest method is: + +.. code-block:: scala + + val future = actor !!! msg + val result: Any = future.apply + // or more simply + val result: Any = future() + +This will cause the current thread to block and wait for the ``Actor`` to 'complete' the ``Future`` with it's reply. Due to the dynamic nature of Akka's ``Actor``\s this result will be untyped and will default to ``Nothing``. The safest way to deal with this is to cast the result to an ``Any`` as is shown in the above example. You can also use the expected result type instead of ``Any``, but if an unexpected type were to be returned you will get a ``ClassCastException``. For more elegant ways to deal with this and to use the result without blocking refer to `Functional Futures`_. + +Use Directly +------------ + +A common use case within Akka is to have some computation performed concurrently without needing the extra utility of an ``Actor``. If you find yourself creating a pool of ``Actor``\s for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: + +.. code-block:: scala + + import akka.dispatch.Future + + val future = Future { + "Hello" + "World" + } + val result = future() + +In the above code the block passed to ``Future`` will be executed by the default ``Dispatcher``, with the return value of the block used to complete the ``Future`` (in this case, the result would be the string: "HelloWorld"). Unlike a ``Future`` that is returned from an ``Actor``, this ``Future`` is properly typed, and we also avoid the overhead of managing an ``Actor``. + +Functional Futures +------------------ + +A recent addition to Akka's ``Future`` is several monadic methods that are very similar to the ones used by Scala's collections. These allow you to create 'pipelines' or 'streams' that the result will travel through. + +Future is a Monad +^^^^^^^^^^^^^^^^^ + +The first method for working with ``Future`` functionally is ``map``. This method takes a ``Function`` which performs some operation on the result of the ``Future``, and returning a new result. The return value of the ``map`` method is another ``Future`` that will contain the new result: + +.. code-block:: scala + + val f1 = Future { + "Hello" + "World" + } + + val f2 = f1 map { x => + x.length + } + + val result = f2() + +In this example we are joining two strings together within a Future. Instead of waiting for this to complete, we apply our Function that calculates the length of the string using the 'map' method. Now we have a second Future that will contain an Int. When our original Future completes, it will also apply our Function and complete the second Future with that result. When we finally await the result, it will contain the number 10. Our original Future still contains the string "HelloWorld" and is unaffected by the 'map'. + +Something to note when using these methods: if the Future is still being processed when one of these methods are called, it will be the completing thread that actually does the work. If the Future is already complete though, it will be run in our current thread. For example: + +.. code-block:: scala + + val f1 = Future { + Thread.sleep(1000) + "Hello" + "World" + } + + val f2 = f1 map { x => + x.length + } + + val result = fs() + +The original Future will take at least 1 second to execute due to sleep, which means it is still being processed at the time we call 'map'. The Function we provide gets stored within the Future and later executed by the dispatcher when the result is ready. + +If we do the opposite: + +.. code-block:: scala + + val f1 = Future { + "Hello" + "World" + } + + Thread.sleep(1000) + + val f2 = f1 map { x => + x.length + } + + val result = fs() + +Our little string has been processed long before our 1 second sleep has finished. Because of this, the dispatcher has moved onto other messages that need processing and can no longer calculate the length of the string for us, instead it gets calculated in the current thread just as if we weren't using a Future. + +Normally this works quite well for us as it means there is very little overhead to running a quick Function. If there is a possiblity of the Function taking a non-trivial amount of time to process it might be better to have this done concurrently, and for that we use 'flatMap': + +.. code-block:: scala + + val f1 = Future { + "Hello" + "World" + } + + val f2 = f1 flatMap {x => + Future(x.length) + } + + val result = fs() + +Now our second Future is executed concurrently as well. This technique can also be used to combine the results of several Futures into a single calculation, which will be better explained in the following sections. + +For Comprehensions +^^^^^^^^^^^^^^^^^^ + +Since Future has a 'map' and 'flatMap' method it can be easily used in a for comprehension: + +.. code-block:: scala + + val f = for { + a <- Future(10 / 2) // 10 / 2 = 5 + b <- Future(a + 1) // 5 + 1 = 6 + c <- Future(a - 1) // 5 - 1 = 4 + } yield b * c // 6 * 4 = 24 + + val result = f() + +Something to keep in mind when doing this is even though it looks like parts of the above example can run in parallel, each step of the for comprehension is run sequentially. This will happen on separate threads for each step but there isn't much benefit over running the calculations all within a single Future. The real benefit comes when the Futures are created first, and then combining them together. + +Composing Futures +^^^^^^^^^^^^^^^^^ + +The example for comprehension above is an example of composing Futures. A common use case for this is combining the replies of several Actors into a single calculation without resorting to calling 'await' to block for each result. For example: + +.. code-block:: scala + + val f1 = actor1 !!! msg1 + val f2 = actor2 !!! msg2 + + val f3 = for { + a: Int <- f1 + b: Int <- f2 + c: String <- actor3 !!! (a + b) + } yield c + + val result = f3() + +Here we have 2 actors processing a single message each. In the for comprehension we need to add the expected types in order to work with the results. Once the 2 results are available, they are being added together and sent to a third actor, which replies with a String, which we assign to 'result'. + +This is fine when dealing with a known amount of Actors, but can grow unwieldly if we have more then a handful. The 'sequence' and 'traverse' helper methods can make it easier to handle more complex use cases. Both of these methods are ways of turning a Traversable[Future[A]] into a Future[Traversable[A]]. For example: + +.. code-block:: scala + + // oddActor returns odd numbers sequentially from 1 + val listOfFutures: List[Future[Int]] = List.fill(100)(oddActor !!! GetNext) + + // now we have a Future[List[Int]] + val futureList = Futures.sequence(listOfFutures) + + // Find the sum of the odd numbers + val oddSum = futureList.map(_.sum).apply + +To better explain what happened in the example, Futures.sequence is taking the List[Future[Int]] and turning it into a Future[List[Int]]. We can then use 'map' to work with the List[Int] directly, and we find the sum of the List. + +The 'traverse' method is similar to 'sequence', but it takes a Traversable[A] and a Function T => Future[B] to return a Future[Traversable[B]]. For example, to use 'traverse' to sum the first 100 odd numbers: + +.. code-block:: scala + + val oddSum = Futures.traverse((1 to 100).toList)(x => Future(x * 2 - 1)).map(_.sum).apply + +This is the same result as this example: + +.. code-block:: scala + + val oddSum = Futures.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))).map(_.sum).apply + +But it may be faster to use 'traverse' as it doesn't have to create an intermediate List[Future[Int]]. + +This is just a sample of what can be done, but to use more advanced techniques it is easier to take advantage of Scalaz, which Akka has support for in it's akka-scalaz module. + +Scalaz +^^^^^^ + +Akka also has a `Scalaz module `_ for a more complete support of programming in a functional style. + +Exceptions (TODO) +----------------- + +Handling exceptions. + +Fine Tuning (TODO) +------------------ + +Dispatchers and timeouts diff --git a/akka-docs/pending/getting-started.rst b/akka-docs/pending/getting-started.rst new file mode 100644 index 0000000000..8f86f5cfca --- /dev/null +++ b/akka-docs/pending/getting-started.rst @@ -0,0 +1,126 @@ +Getting Started +=============== + +There are several ways to download Akka. You can download the full distribution with microkernel, which includes all modules. You can download just the core distribution. Or you can use a build tool like Maven or SBT to download dependencies from the Akka Maven repository. + +A list of each of the Akka module JARs dependencies can be found `here `_. + +Using a release distribution +---------------------------- + +Akka is split up into two different parts: + +* Akka - The core modules. Reflects all the sections under 'Scala API' and 'Java API' in the navigation bar. +* Akka Modules - The microkernel and add-on modules. Reflects all the sections under 'Add-on modules' in the navigation bar. + +Download the release you need (Akka core or Akka Modules) from ``_ and unzip it. + +Microkernel +^^^^^^^^^^^ + +The Akka Modules distribution includes the mircokernel. To run the microkernel: + +* Set the AKKA_HOME environment variable to the root of the Akka distribution. +* Run ``java -jar akka-modules-1.0.jar``. This will boot up the microkernel and deploy all samples applications from './deploy' dir. + +For example (bash shell): + +:: + + cd akka-modules-1.0 + export AKKA_HOME=`pwd` + java -jar akka-modules-1.0.jar + +Now you can continue with reading the `tutorial `_ and try to build the tutorial sample project step by step. This can be a good starting point before diving into the reference documentation which can be navigated in the left sidebar. + +Using a build tool +------------------ + +Akka can be used with build tools that support Maven repositories. The Akka Maven repository can be found at ``_. + +Using Akka with Maven +^^^^^^^^^^^^^^^^^^^^^ + +If you want to use Akka with Maven then you need to add this repository to your ``pom.xml``: + +.. code-block:: xml + + + Akka + Akka Maven2 Repository + http://akka.io/repository/ + + +Then you can add the Akka dependencies. For example, here is the dependency for Akka Actor 1.0: + +.. code-block:: xml + + + se.scalablesolutions.akka + akka-actor + 1.0 + + +Using Akka with SBT +^^^^^^^^^^^^^^^^^^^ + +Akka has an SBT plugin which makes it very easy to get started with Akka and SBT. + +The Scala version in your SBT project needs to match the version that Akka is built against. For 1.0 this is 2.8.1. + +To use the plugin, first add a plugin definition to your SBT project by creating project/plugins/Plugins.scala with: + +.. code-block:: scala + + import sbt._ + + class Plugins(info: ProjectInfo) extends PluginDefinition(info) { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.0" + } + +*Note: the plugin version matches the Akka version provided. The current release is 1.0.* + +Then mix the AkkaProject trait into your project definition. For example: + +.. code-block:: scala + + class MyProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject + +*Note: This adds akka-actor as a dependency by default.* + +If you also want to include other Akka modules there is a convenience method: ``akkaModule``. For example, you can add extra Akka modules by adding any of the following lines to your project class: + +.. code-block:: scala + + val akkaStm = akkaModule("stm") + val akkaTypedActor = akkaModule("typed-actor") + val akkaRemote = akkaModule("remote") + val akkaHttp = akkaModule("http") + val akkaAmqp = akkaModule("amqp") + val akkaCamel = akkaModule("camel") + val akkaCamelTyped = akkaModule("camel-typed") + val akkaSpring = akkaModule("spring") + val akkaJta = akkaModule("jta") + val akkaCassandra = akkaModule("persistence-cassandra") + val akkaMongo = akkaModule("persistence-mongo") + val akkaRedis = akkaModule("persistence-redis") + +Build from sources +------------------ + +Akka uses Git and is hosted at `Github `_. + +* Akka: clone the Akka repository from ``_ +* Akka Modules: clone the Akka Modules repository from ``_ + +Continue reading the page on `how to build and run Akka `_ + +Need help? +---------- + +If you have questions you can get help on the `Akka Mailing List `_. + +You can also ask for `commercial support `_. + +Thanks for being a part of the Akka community. diff --git a/akka-docs/pending/guice-integration.rst b/akka-docs/pending/guice-integration.rst new file mode 100644 index 0000000000..44a77fd22c --- /dev/null +++ b/akka-docs/pending/guice-integration.rst @@ -0,0 +1,50 @@ +Guice Integration +================= + +Module stability: **STABLE** + +All Typed Actors supports dependency injection using `Guice `_ annotations (such as ‘@Inject’ etc.). +The ‘TypedActorManager’ class understands Guice and will do the wiring for you. + +External Guice modules +---------------------- + +You can also plug in external Guice modules and have not-actors wired up as part of the configuration. +Here is an example: + +.. code-block:: java + + import static akka.config.Supervision.*; + import static akka.config.SupervisorConfig.*; + + TypedActorConfigurator manager = new TypedActorConfigurator(); + + manager.configure( + new AllForOneStrategy(new Class[]{Exception.class}, 3, 1000), + new SuperviseTypedActor[] { + new SuperviseTypedActor( + Foo.class, + FooImpl.class, + temporary(), + 1000), + new SuperviseTypedActor( + Bar.class, + BarImpl.class, + permanent(), + 1000) + }) + .addExternalGuiceModule(new AbstractModule() { + protected void configure() { + bind(Ext.class).to(ExtImpl.class).in(Scopes.SINGLETON); + }}) + .configure() + .inject() + .supervise(); + +Retrieve the external Guice dependency +-------------------------------------- + +The external dependency can be retrieved like this: +``_ +Ext ext = manager.getExternalDependency(Ext.class); +``_ diff --git a/akka-docs/pending/http.rst b/akka-docs/pending/http.rst new file mode 100644 index 0000000000..739f443c1d --- /dev/null +++ b/akka-docs/pending/http.rst @@ -0,0 +1,521 @@ +HTTP +==== + +Module stability: **SOLID** + +When using Akkas embedded servlet container: +-------------------------------------------- + +Akka supports the JSR for REST called JAX-RS (JSR-311). It allows you to create interaction with your actors through HTTP + REST + +You can deploy your REST services directly into the Akka kernel. All you have to do is to drop the JAR with your application containing the REST services into the ‘$AKKA_HOME/deploy’ directory and specify in your akka.conf what resource packages to scan for (more on that below) and optionally define a “boot class” (if you need to create any actors or do any config). WAR deployment is coming soon. + +Boot configuration class +------------------------ + +The boot class is needed for Akka to bootstrap the application and should contain the initial supervisor configuration of any actors in the module. + +The boot class should be a regular POJO with a default constructor in which the initial configuration is done. The boot class then needs to be defined in the ‘$AKKA_HOME/config/akka.conf’ config file like this: + +.. code-block:: ruby + + akka { + boot = ["sample.java.Boot", "sample.scala.Boot"] # FQN to the class doing initial actor + # supervisor bootstrap, should be defined in default constructor + ... + } + +After you've placed your service-jar into the $AKKA_HOME/deploy directory, you'll need to tell Akka where to look for your services, and you do that by specifying what packages you want Akka to scan for services, and that's done in akka.conf in the http-section: + +.. code-block:: ruby + + akka { + http { + ... + resource-packages = ["com.bar","com.foo.bar"] # List with all resource packages for your Jersey services + ... + } + +When deploying in another servlet container: +-------------------------------------------- + +If you deploy Akka in another JEE container, don't forget to create an Akka initialization and cleanup hook: + +.. code-block:: scala + + package com.my //<--- your own package + import akka.util.AkkaLoader + import akka.remote.BootableRemoteActorService + import akka.actor.BootableActorLoaderService + import javax.servlet.{ServletContextListener, ServletContextEvent} + + /** + * This class can be added to web.xml mappings as a listener to start and postStop Akka. + * + * ... + * + * com.my.Initializer + * + * ... + * + */ + class Initializer extends ServletContextListener { + lazy val loader = new AkkaLoader + def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown + def contextInitialized(e: ServletContextEvent): Unit = + loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService) //<--- Important + // loader.boot(true, new BootableActorLoaderService {}) // If you don't need akka-remote + } + +Then you just declare it in your web.xml: + +.. code-block:: xml + + + ... + + your.package.Initializer + + ... + + +Also, you need to map the servlet that will handle your Jersey/JAX-RS calls, you use Jerseys ServletContainer servlet. + +.. code-block:: xml + + + ... + + Akka + com.sun.jersey.spi.container.servlet.ServletContainer + + + com.sun.jersey.config.property.resourceConfigClass + com.sun.jersey.api.core.PackagesResourceConfig + + + com.sun.jersey.config.property.packages + your.resource.package.here;and.another.here;and.so.on + + + + * + Akka + + ... + + +Adapting your own Akka Initializer for the Servlet Container +------------------------------------------------------------ + +If you want to use akka-camel or any other modules that have their own "Bootable"'s you'll need to write your own Initializer, which is _ultra_ simple, see below for an example on how to include Akka-camel. + +.. code-block:: scala + + package com.my //<--- your own package + import akka.remote.BootableRemoteActorService + import akka.actor.BootableActorLoaderService + import akka.camel.CamelService + import javax.servlet.{ServletContextListener, ServletContextEvent} + + /** + * This class can be added to web.xml mappings as a listener to start and postStop Akka. + * + * ... + * + * com.my.Initializer + * + * ... + * + */ + class Initializer extends ServletContextListener { + lazy val loader = new AkkaLoader + def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown + def contextInitialized(e: ServletContextEvent): Unit = + loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService with CamelService) //<--- Important + } + +Java API: Typed Actors +---------------------- + +`Sample module for REST services with Actors in Java `_ + +Scala API: Actors +----------------- + +`Sample module for REST services with Actors in Scala `_ + +Using Akka with the Pinky REST/MVC framework +-------------------------------------------- + +Pinky has a slick Akka integration. Read more `here `_ + +jetty-run in SBT +---------------- + +If you want to use jetty-run in SBT you need to exclude the version of Jetty that is bundled in akka-http: + +.. code-block:: scala + + override def ivyXML = + + + + + + +Mist - Lightweight Asynchronous HTTP +------------------------------------ + +The *Mist* layer was developed to provide a direct connection between the servlet container and Akka actors with the goal of handling the incoming HTTP request as quickly as possible in an asynchronous manner. The motivation came from the simple desire to treat REST calls as completable futures, that is, effectively passing the request along an actor message chain to be resumed at the earliest possible time. The primary constraint was to not block any existing threads and secondarily, not create additional ones. Mist is very simple and works both with Jetty Continuations as well as with Servlet API 3.0 (tested using Jetty-8.0.0.M1). When the servlet handles a request, a message is created typed to represent the method (e.g. Get, Post, etc.), the request is suspended and the message is sent (fire-and-forget) to the *root endpoint* actor. That's it. There are no POJOs required to host the service endpoints and the request is treated as any other. The message can be resumed (completed) using a number of helper methods that set the proper HTTP response status code. + +Complete runnable example can be found here: ``_ + +Endpoints +^^^^^^^^^ + +Endpoints are actors that handle request messages. Minimally there must be an instance of the *RootEndpoint* and then at least one more (to implement your services). + +Preparations +^^^^^^^^^^^^ + +In order to use Mist you have to register the MistServlet in *web.xml* or do the analogous for the embedded server if running in Akka Micrkernel: + +.. code-block:: xml + + + akkaMistServlet + akka.http.AkkaMistServlet + + + + + akkaMistServlet + /* + + +Then you also have to add the following dependencies to your SBT build definition: + +.. code-block:: scala + + val jettyWebapp = "org.eclipse.jetty" % "jetty-webapp" % "8.0.0.M2" % "test" + val javaxServlet30 = "org.mortbay.jetty" % "servlet-api" % "3.0.20100224" % "provided" + +Attention: You have to use SBT 0.7.5.RC0 or higher in order to be able to work with that Jetty version. + +An Example +^^^^^^^^^^ + +Startup +******* + +In this example, we'll use the built-in *RootEndpoint* class and implement our own service from that. Here the services are started in the boot loader and attached to the top level supervisor. + +.. code-block:: scala + + class Boot { + val factory = SupervisorFactory( + SupervisorConfig( + OneForOneStrategy(List(classOf[Exception]), 3, 100), + // + // in this particular case, just boot the built-in default root endpoint + // + Supervise( + actorOf[RootEndpoint], + Permanent) :: + Supervise( + actorOf[SimpleAkkaAsyncHttpService], + Permanent) + :: Nil)) + factory.newInstance.start + } + +**Defining the Endpoint** +The service is an actor that mixes in the *Endpoint* trait. Here the dispatcher is taken from the Akka configuration file which allows for custom tuning of these actors, though naturally, any dispatcher can be used. + +URI Handling +************ + +Rather than use traditional annotations to pair HTTP request and class methods, Mist uses hook and provide functions. This offers a great deal of flexibility in how a given endpoint responds to a URI. A hook function is simply a filter, returning a Boolean to indicate whether or not the endpoint will handle the URI. This can be as simple as a straight match or as fancy as you need. If a hook for a given URI returns true, the matching provide function is called to obtain an actor to which the message can be delivered. Notice in the example below, in one case, the same actor is returned and in the other, a new actor is created and returned. Note that URI hooking is non-exclusive and a message can be delivered to multiple actors (see next example). + +Plumbing +******** + +Hook and provider functions are attached to a parent endpoint, in this case the root, by sending it the **Endpoint.Attach** message. +Finally, bind the *handleHttpRequest* function of the *Endpoint* trait to the actor's *receive* function and we're done. + +.. code-block:: scala + + class SimpleAkkaAsyncHttpService extends Actor with Endpoint { + final val ServiceRoot = "/simple/" + final val ProvideSameActor = ServiceRoot + "same" + final val ProvideNewActor = ServiceRoot + "new" + + // + // use the configurable dispatcher + // + self.dispatcher = Endpoint.Dispatcher + + // + // there are different ways of doing this - in this case, we'll use a single hook function + // and discriminate in the provider; alternatively we can pair hooks & providers + // + def hook(uri: String): Boolean = ((uri == ProvideSameActor) || (uri == ProvideNewActor)) + def provide(uri: String): ActorRef = { + if (uri == ProvideSameActor) same + else actorOf[BoringActor].start() + } + + // + // this is where you want attach your endpoint hooks + // + override def preStart = { + // + // we expect there to be one root and that it's already been started up + // obviously there are plenty of other ways to obtaining this actor + // the point is that we need to attach something (for starters anyway) + // to the root + // + val root = Actor.registry.actorsFor(classOf[RootEndpoint]).head + root ! Endpoint.Attach(hook, provide) + } + + // + // since this actor isn't doing anything else (i.e. not handling other messages) + // just assign the receive func like so... + // otherwise you could do something like: + // def myrecv = {...} + // def receive = myrecv orElse _recv + // + def receive = handleHttpRequest + + // + // this will be our "same" actor provided with ProvideSameActor endpoint is hit + // + lazy val same = actorOf[BoringActor].start() + } + +Handling requests +***************** + +Messages are handled just as any other that are received by your actor. The servlet requests and response are not hidden and can be accessed directly as shown below. + +.. code-block:: scala + + /** + * Define a service handler to respond to some HTTP requests + */ + class BoringActor extends Actor { + import java.util.Date + import javax.ws.rs.core.MediaType + + var gets = 0 + var posts = 0 + var lastget: Option[Date] = None + var lastpost: Option[Date] = None + + def receive = { + // handle a get request + case get: Get => + // the content type of the response. + // similar to @Produces annotation + get.response.setContentType(MediaType.TEXT_HTML) + + // + // "work" + // + gets += 1 + lastget = Some(new Date) + + // + // respond + // + val res = "

Gets: "+gets+" Posts: "+posts+"

Last Get: "+lastget.getOrElse("Never").toString+" Last Post: "+lastpost.getOrElse("Never").toString+"

" + get.OK(res) + + // handle a post request + case post:Post => + // the expected content type of the request + // similar to @Consumes + if (post.request.getContentType startsWith MediaType.APPLICATION_FORM_URLENCODED) { + // the content type of the response. + // similar to @Produces annotation + post.response.setContentType(MediaType.TEXT_HTML) + + // "work" + posts += 1 + lastpost = Some(new Date) + + // respond + val res = "

Gets: "+gets+" Posts: "+posts+"

Last Get: "+lastget.getOrElse("Never").toString+" Last Post: "+lastpost.getOrElse("Never").toString+"

" + post.OK(res) + } else { + post.UnsupportedMediaType("Content-Type request header missing or incorrect (was '" + post.request.getContentType + "' should be '" + MediaType.APPLICATION_FORM_URLENCODED + "')") + } + } + + case other: RequestMethod => + other.NotAllowed("Invalid method for this endpoint") + } + } + +**Timeouts** +Messages will expire according to the default timeout (specified in akka.conf). Individual messages can also be updated using the *timeout* method. One thing that may seem unexpected is that when an expired request returns to the caller, it will have a status code of OK (200). Mist will add an HTTP header to such responses to help clients, if applicable. By default, the header will be named "Async-Timeout" with a value of "expired" - both of which are configurable. + +Another Example - multiplexing handlers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As noted above, hook functions are non-exclusive. This means multiple actors can handle the same request if desired. In this next example, the hook functions are identical (yes, the same one could have been reused) and new instances of both A and B actors will be created to handle the Post. A third mediator is inserted to coordinate the results of these actions and respond to the caller. + +.. code-block:: scala + + package sample.mist + + import akka.actor._ + import akka.actor.Actor._ + import akka.http._ + + import javax.servlet.http.HttpServletResponse + + class InterestingService extends Actor with Endpoint { + final val ServiceRoot = "/interesting/" + final val Multi = ServiceRoot + "multi/" + // use the configurable dispatcher + self.dispatcher = Endpoint.Dispatcher + + // + // The "multi" endpoint shows forking off multiple actions per request + // It is triggered by POSTing to http://localhost:9998/interesting/multi/{foo} + // Try with/without a header named "Test-Token" + // Try with/without a form parameter named "Data" + def hookMultiActionA(uri: String): Boolean = uri startsWith Multi + def provideMultiActionA(uri: String): ActorRef = actorOf(new ActionAActor(complete)).start() + + def hookMultiActionB(uri: String): Boolean = uri startsWith Multi + def provideMultiActionB(uri: String): ActorRef = actorOf(new ActionBActor(complete)).start() + + // + // this is where you want attach your endpoint hooks + // + override def preStart = { + // + // we expect there to be one root and that it's already been started up + // obviously there are plenty of other ways to obtaining this actor + // the point is that we need to attach something (for starters anyway) + // to the root + // + val root = Actor.registry.actorsFor(classOf[RootEndpoint]).head + root ! Endpoint.Attach(hookMultiActionA, provideMultiActionA) + root ! Endpoint.Attach(hookMultiActionB, provideMultiActionB) + } + + // + // since this actor isn't doing anything else (i.e. not handling other messages) + // just assign the receive func like so... + // otherwise you could do something like: + // def myrecv = {...} + // def receive = myrecv orElse handleHttpRequest + // + def receive = handleHttpRequest + + // + // this guy completes requests after other actions have occured + // + lazy val complete = actorOf[ActionCompleteActor].start() + } + + class ActionAActor(complete:ActorRef) extends Actor { + import javax.ws.rs.core.MediaType + + def receive = { + // handle a post request + case post: Post => + // the expected content type of the request + // similar to @Consumes + if (post.request.getContentType startsWith MediaType.APPLICATION_FORM_URLENCODED) { + // the content type of the response. + // similar to @Produces annotation + post.response.setContentType(MediaType.TEXT_HTML) + + // get the resource name + val name = post.request.getRequestURI.substring("/interesting/multi/".length) + if (name.length % 2 == 0) post.response.getWriter.write("

Action A verified request.

") + else post.response.getWriter.write("

Action A could not verify request.

") + + // notify the next actor to coordinate the response + complete ! post + } else post.UnsupportedMediaType("Content-Type request header missing or incorrect (was '" + post.request.getContentType + "' should be '" + MediaType.APPLICATION_FORM_URLENCODED + "')") + } + } + } + + class ActionBActor(complete:ActorRef) extends Actor { + import javax.ws.rs.core.MediaType + + def receive = { + // handle a post request + case post: Post => + // the expected content type of the request + // similar to @Consumes + if (post.request.getContentType startsWith MediaType.APPLICATION_FORM_URLENCODED) { + // pull some headers and form params + def default(any: Any): String = "" + + val token = post.getHeaderOrElse("Test-Token", default) + val data = post.getParameterOrElse("Data", default) + + val (resp, status) = (token, data) match { + case ("", _) => ("No token provided", HttpServletResponse.SC_FORBIDDEN) + case (_, "") => ("No data", HttpServletResponse.SC_ACCEPTED) + case _ => ("Data accepted", HttpServletResponse.SC_OK) + } + + // update the response body + post.response.getWriter.write(resp) + + // notify the next actor to coordinate the response + complete ! (post, status) + } else post.UnsupportedMediaType("Content-Type request header missing or incorrect (was '" + post.request.getContentType + "' should be '" + MediaType.APPLICATION_FORM_URLENCODED + "')") + } + + case other: RequestMethod => + other.NotAllowed("Invalid method for this endpoint") + } + } + + class ActionCompleteActor extends Actor { + import collection.mutable.HashMap + + val requests = HashMap.empty[Int, Int] + + def receive = { + case req: RequestMethod => + if (requests contains req.hashCode) complete(req) + else requests += (req.hashCode -> 0) + + case t: Tuple2[RequestMethod, Int] => + if (requests contains t._1.hashCode) complete(t._1) + else requests += (t._1.hashCode -> t._2) + } + + def complete(req: RequestMethod) = requests.remove(req.hashCode) match { + case Some(HttpServletResponse.SC_FORBIDDEN) => req.Forbidden("") + case Some(HttpServletResponse.SC_ACCEPTED) => req.Accepted("") + case Some(_) => req.OK("") + case _ => {} + } + } + +Examples +^^^^^^^^ + +Using the Akka Mist module with OAuth +************************************* + +``_ + +Using the Akka Mist module with the Facebook Graph API and WebGL +**************************************************************** + +Example project using Akka Mist with the Facebook Graph API and WebGL +``_ diff --git a/akka-docs/pending/issue-tracking.rst b/akka-docs/pending/issue-tracking.rst new file mode 100644 index 0000000000..fcff2e2c94 --- /dev/null +++ b/akka-docs/pending/issue-tracking.rst @@ -0,0 +1,41 @@ +Issue Tracking +============== + +Akka is using Assembla as issue tracking system. + +Browsing +-------- + +You can find the Akka tickets here: ``_ +The roadmap for each milestone is here: ``_ + +Creating tickets +---------------- + +In order to create tickets you need to do the following: + +# Register here: ``_ +# Log in +# Create the ticket: ``_ + +Thanks a lot for reporting bugs and suggesting features. + +Failing test +------------ + +Please submit a failing test on the following format: + +``_ + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers + +class Ticket001Spec extends WordSpec with MustMatchers { + + "An XXX" should { + "do YYY" in { + 1 must be (1) + } + } +} +``_ diff --git a/akka-docs/pending/language-bindings.rst b/akka-docs/pending/language-bindings.rst new file mode 100644 index 0000000000..2f44df50e2 --- /dev/null +++ b/akka-docs/pending/language-bindings.rst @@ -0,0 +1,24 @@ +Other Language Bindings +======================= + +JRuby +----- + +High level concurrency using Akka actors and JRuby. + +``_ + +If you are using STM with JRuby then you need to unwrap the Multiverse control flow exception as follows: + +.. code-block:: ruby + + begin + ... atomic stuff + rescue NativeException => e + raise e.cause if e.cause.java_class.package.name.include? "org.multiverse" + end + +Groovy/Groovy++ +--------------- + +``_ diff --git a/akka-docs/pending/licenses.rst b/akka-docs/pending/licenses.rst new file mode 100644 index 0000000000..02a3a10cec --- /dev/null +++ b/akka-docs/pending/licenses.rst @@ -0,0 +1,197 @@ +Licenses +======== + +.. highlight:: text + +Akka License +------------ + +:: + + This software is licensed under the Apache 2 license, quoted below. + + Copyright 2009-2011 Scalable Solutions AB + + Licensed under the Apache License, Version 2.0 (the "License"); you may not + use this file except in compliance with the License. You may obtain a copy of + the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations under + the License. + +Akka Committer License Agreement +-------------------------------- + +All committers have signed this CLA + +:: + + Based on: http://www.apache.org/licenses/icla.txt + + Scalable Solutions AB + Individual Contributor License Agreement ("Agreement") V2.0 + http://www.scalablesolutions.se/licenses/ + + Thank you for your interest in Akka, a Scalable Solutions AB (the + "Company") Open Source project. In order to clarify the intellectual + property license granted with Contributions from any person or entity, + the Company must have a Contributor License Agreement ("CLA") on file + that has been signed by each Contributor, indicating agreement to the + license terms below. This license is for your protection as a + Contributor as well as the protection of the Company and its users; + it does not change your rights to use your own Contributions for any + other purpose. + + Full name: ______________________________________________________ + + Mailing Address: ________________________________________________ + + _________________________________________________________________ + + _________________________________________________________________ + + Country: ______________________________________________________ + + Telephone: ______________________________________________________ + + Facsimile: ______________________________________________________ + + E-Mail: ______________________________________________________ + + You accept and agree to the following terms and conditions for Your + present and future Contributions submitted to the Company. In + return, the Company shall not use Your Contributions in a way that + is contrary to the public benefit or inconsistent with its nonprofit + status and bylaws in effect at the time of the Contribution. Except + for the license granted herein to the Company and recipients of + software distributed by the Company, You reserve all right, title, + and interest in and to Your Contributions. + + 1. Definitions. + + "You" (or "Your") shall mean the copyright owner or legal entity + authorized by the copyright owner that is making this Agreement + with the Company. For legal entities, the entity making a + Contribution and all other entities that control, are controlled + by, or are under common control with that entity are considered to + be a single Contributor. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "Contribution" shall mean any original work of authorship, + including any modifications or additions to an existing work, that + is intentionally submitted by You to the Company for inclusion + in, or documentation of, any of the products owned or managed by + the Company (the "Work"). For the purposes of this definition, + "submitted" means any form of electronic, verbal, or written + communication sent to the Company or its representatives, + including but not limited to communication on electronic mailing + lists, source code control systems, and issue tracking systems that + are managed by, or on behalf of, the Company for the purpose of + discussing and improving the Work, but excluding communication that + is conspicuously marked or otherwise designated in writing by You + as "Not a Contribution." + + 2. Grant of Copyright License. Subject to the terms and conditions of + this Agreement, You hereby grant to the Company and to + recipients of software distributed by the Company a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare derivative works of, + publicly display, publicly perform, sublicense, and distribute Your + Contributions and such derivative works. + + 3. Grant of Patent License. Subject to the terms and conditions of + this Agreement, You hereby grant to the Company and to + recipients of software distributed by the Company a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have + made, use, offer to sell, sell, import, and otherwise transfer the + Work, where such license applies only to those patent claims + licensable by You that are necessarily infringed by Your + Contribution(s) alone or by combination of Your Contribution(s) + with the Work to which such Contribution(s) was submitted. If any + entity institutes patent litigation against You or any other entity + (including a cross-claim or counterclaim in a lawsuit) alleging + that your Contribution, or the Work to which you have contributed, + constitutes direct or contributory patent infringement, then any + patent licenses granted to that entity under this Agreement for + that Contribution or Work shall terminate as of the date such + litigation is filed. + + 4. You agree that all Contributions are and will be given entirely + voluntarily. Company will not be required to use, or to refrain + from using, any Contributions that You, will not, absent a + separate written agreement signed by Company, create any + confidentiality obligation of Company, and Company has not + undertaken any obligation to treat any Contributions or other + information You have given Company or will give Company in the + future as confidential or proprietary information. Furthermore, + except as otherwise provided in a separate subsequence written + agreement between You and Company, Company will be free to use, + disclose, reproduce, license or otherwise distribute, and exploit + the Contributions as it sees fit, entirely without obligation or + restriction of any kind on account of any proprietary or + intellectual property rights or otherwise. + + 5. You represent that you are legally entitled to grant the above + license. If your employer(s) has rights to intellectual property + that you create that includes your Contributions, you represent + that you have received permission to make Contributions on behalf + of that employer, that your employer has waived such rights for + your Contributions to the Company, or that your employer has + executed a separate Corporate CLA with the Company. + + 6. You represent that each of Your Contributions is Your original + creation (see section 7 for submissions on behalf of others). You + represent that Your Contribution submissions include complete + details of any third-party license or other restriction (including, + but not limited to, related patents and trademarks) of which you + are personally aware and which are associated with any part of Your + Contributions. + + 7. You are not expected to provide support for Your Contributions, + except to the extent You desire to provide support. You may provide + support for free, for a fee, or not at all. Unless required by + applicable law or agreed to in writing, You provide Your + Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + OF ANY KIND, either express or implied, including, without + limitation, any warranties or conditions of TITLE, NON- + INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. + + 8. Should You wish to submit work that is not Your original creation, + You may submit it to the Company separately from any + Contribution, identifying the complete details of its source and of + any license or other restriction (including, but not limited to, + related patents, trademarks, and license agreements) of which you + are personally aware, and conspicuously marking the work as + "Submitted on behalf of a third-party: [named here]". + + 9. You agree to notify the Company of any facts or circumstances of + which you become aware that would make these representations + inaccurate in any respect. + + 9. The validity of the interpretation of this Agreements shall be + governed by, and constructed and enforced in accordance with, the + laws of Sweden, applicable to the agreements made there (excluding + the conflict of law rules). This Agreement embodies the entire + agreement and understanding of the parties hereto and supersedes + any and all prior agreements, arrangements and understandings + relating to the matters provided for herein. No alteration, waiver, + amendment changed or supplement hereto shall be binding more + effective unless the same as set forth in writing signed by both + parties. + + Please sign: __________________________________ Date: ________________ + +Licenses for Dependency Libraries +--------------------------------- + +Each dependency and its license can be seen in the project build file (the comment on the side of each dependency): +``_ diff --git a/akka-docs/pending/logging.rst b/akka-docs/pending/logging.rst new file mode 100644 index 0000000000..833f2f419b --- /dev/null +++ b/akka-docs/pending/logging.rst @@ -0,0 +1,4 @@ +Logging +======= + +Logging has been removed. See the `Event Handler `_. diff --git a/akka-docs/pending/migration-guide-0.10.x-1.0.x.rst b/akka-docs/pending/migration-guide-0.10.x-1.0.x.rst new file mode 100644 index 0000000000..300100941f --- /dev/null +++ b/akka-docs/pending/migration-guide-0.10.x-1.0.x.rst @@ -0,0 +1,432 @@ +Migration guide from 0.10.x to 1.0.x +==================================== + +---- + +Akka & Akka Modules separated into two different repositories and distributions +------------------------------------------------------------------------------- + +Akka is split up into two different parts: +* Akka - Reflects all the sections under 'Scala API' and 'Java API' in the navigation bar. +* Akka Modules - Reflects all the sections under 'Add-on modules' in the navigation bar. + +Download the release you need (Akka core or Akka Modules) from ``_ and unzip it. + +---- + +Changed Akka URI +---------------- + +http:*akkasource.org changed to http:*akka.io + +Reflects XSDs, Maven repositories, ScalaDoc etc. + +---- + +Removed 'se.scalablesolutions' prefix +------------------------------------- + +We have removed some boilerplate by shortening the Akka package from +**se.scalablesolutions.akka** to just **akka** so just do a search-replace in your project, +we apologize for the inconvenience, but we did it for our users. + +---- + +Akka-core is no more +-------------------- + +Akka-core has been split into akka-actor, akka-stm, akka-typed-actor & akka-remote this means that you need to update any deps you have on akka-core. + +---- + +Config +------ + +Turning on/off modules +^^^^^^^^^^^^^^^^^^^^^^ + +All the 'service = on' elements for turning modules on and off have been replaced by a top-level list of the enabled services. + +Services available for turning on/off are: +* "remote" +* "http" +* "camel" + +**All** services are **OFF** by default. Enable the ones you are using. + +.. code-block:: ruby + + akka { + enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"] + } + +Renames +^^^^^^^ + +* 'rest' section - has been renamed to 'http' to align with the module name 'akka-http'. +* 'storage' section - has been renamed to 'persistence' to align with the module name 'akka-persistence'. + +.. code-block:: ruby + + akka { + http { + .. + } + + persistence { + .. + } + } + +---- + +Important changes from RC2-RC3 +------------------------------ + +**akka.config.SupervisionSupervise** +def apply(actorRef: ActorRef, lifeCycle: LifeCycle, registerAsRemoteService: Boolean = false) +- boolean instead of remoteAddress, registers that actor with it's id as service name on the local server + +**akka.actor.Actors now is the API for Java to interact with Actors, Remoting and ActorRegistry:** + +import static akka.actor.Actors.*; +*actorOf()..* +remote().actorOf()... +*registry().actorsFor("foo")...* + +***akka.actor.Actor now is the API for Scala to interact with Actors, Remoting and ActorRegistry:*** + +*import akka.actor.Actor._* +actorOf()... +*remote.actorOf()...* +registry.actorsFor("foo") + +**object UntypedActor has been deleted and replaced with akka.actor.Actors/akka.actor.Actor (Java/Scala)** +UntypedActor.actorOf -> Actors.actorOf (Java) or Actor.actorOf (Scala) + +**object ActorRegistry has been deleted and replaced with akka.actor.Actors.registry()/akka.actor.Actor.registry (Java/Scala)** +ActorRegistry. -> Actors.registry(). (Java) or Actor.registry. (Scala) + +**object RemoteClient has been deleted and replaced with akka.actor.Actors.remote()/akka.actor.Actor.remote (Java/Scala)** +RemoteClient -> Actors.remote() (Java) or Actor.remote (Scala) + +**object RemoteServer has been deleted and replaced with akka.actor.Actors.remote()/akka.actor.Actor.remote (Java/Scala)** +RemoteServer - deleted -> Actors.remote() (Java) or Actor.remote (Scala) + +**classes RemoteActor, RemoteUntypedActor and RemoteUntypedConsumerActors has been deleted and replaced** +**with akka.actor.Actors.remote().actorOf(x, host port)/akka.actor.Actor.remote.actorOf(x, host, port)** +RemoteActor, RemoteUntypedActor - deleted, use: remote().actorOf(YourActor.class, host, port) (Java) or remote.actorOf[YourActor](host, port) + +**Remoted spring-actors now default to spring id as service-name, use "service-name" attribute on "remote"-tag to override** + +**Listeners for RemoteServer and RemoteClient** are now registered on Actors.remote().addListener (Java) or Actor.remote.addListener (Scala), +this means that all listeners get all remote events, both remote server evens and remote client events, **so adjust your code accordingly.** + +**ActorRef.startLinkRemote has been removed since one specified on creation wether the actor is client-managed or not.** + +Important change from RC3 to RC4 +-------------------------------- + +The Akka-Spring namespace has changed from akkasource.org and scalablesolutions.se to http:*akka.io/schema and http:*akka.io/akka-.xsd + +---- + +Module akka-actor +----------------- + +The Actor.init callback has been renamed to "preStart" to align with the general callback naming and is more clear about when it's called. + +The Actor.shutdown callback has been renamed to "postStop" to align with the general callback naming and is more clear about when it's called. + +The Actor.initTransactionalState callback has been removed, logic should be moved to preStart and be wrapped in an atomic block + +**se.scalablesolutions.akka.config.ScalaConfig** and **se.scalablesolutions.akka.config.JavaConfig** have been merged into **akka.config.Supervision** + +**RemoteAddress** has moved from **se.scalablesolutions.akka.config.ScalaConfig** to **akka.config** + +The ActorRef.lifeCycle has changed signature from Option[LifeCycle] to LifeCycle, this means you need to change code that looks like this: +**self.lifeCycle = Some(LifeCycle(Permanent))** to **self.lifeCycle = Permanent** + +The equivalent to **self.lifeCycle = None** is **self.lifeCycle = UndefinedLifeCycle** +**LifeCycle(Permanent)** becomes **Permanent** +**new LifeCycle(permanent())** becomes **permanent()** (need to do: import static se.scalablesolutions.akka.config.Supervision.*; first) + +**JavaConfig.Component** and **ScalaConfig.Component** have been consolidated and renamed as **Supervision.SuperviseTypedActor** + +**self.trapExit** has been moved into the FaultHandlingStrategy, and **ActorRef.faultHandler** has switched type from Option[FaultHandlingStrategy] +to FaultHandlingStrategy: + +|| **Scala** || +|| +``_ +import akka.config.Supervision._ + +self.faultHandler = OneForOneStrategy(List(classOf[Exception]), 3, 5000) + +``_ || +|| **Java** || +|| +``_ +import static akka.Supervision.*; + +getContext().setFaultHandler(new OneForOneStrategy(new Class[] { Exception.class },50,1000)) + +``_ || + +**RestartStrategy, AllForOne, OneForOne** have been replaced with **AllForOneStrategy** and **OneForOneStrategy** in **se.scalablesolutions.akka.config.Supervision** + +|| **Scala** || +|| +``_ +import akka.config.Supervision._ +SupervisorConfig( + OneForOneStrategy(List(classOf[Exception]), 3, 5000), + Supervise(pingpong1,Permanent) :: Nil +) + +``_ || +|| **Java** || +|| +``_ +import static akka.Supervision.*; + +new SupervisorConfig( + new OneForOneStrategy(new Class[] { Exception.class },50,1000), + new Server[] { new Supervise(pingpong1, permanent()) } +) + +``_ || + +We have removed the following factory methods: + +**Actor.actor { case foo => bar }** +**Actor.transactor { case foo => bar }** +**Actor.temporaryActor { case foo => bar }** +**Actor.init {} receive { case foo => bar }** + +They started the actor and no config was possible, it was inconsistent and irreparable. + +replace with your own factories, or: + +**actorOf( new Actor { def receive = { case foo => bar } } ).start** +**actorOf( new Actor { self.lifeCycle = Temporary; def receive = { case foo => bar } } ).start** + +ReceiveTimeout is now rescheduled after every message, before there was only an initial timeout. +To stop rescheduling of ReceiveTimeout, set **receiveTimeout = None** + +HotSwap +------- + +HotSwap does no longer use behavior stacking by default, but that is an option to both "become" and HotSwap. + +HotSwap now takes for Scala a Function from ActorRef to a Receive, the ActorRef passed in is the reference to self, so you can do self.reply() etc. + +---- + +Module akka-stm +--------------- + +The STM stuff is now in its own module. This means that there is no support for transactions or transactors in akka-actor. + +Local and global +^^^^^^^^^^^^^^^^ + +The **local/global** distinction has been dropped. This means that if the following general import was being used: + +.. code-block:: scala + + import akka.stm.local._ + +this is now just: + +.. code-block:: scala + + import akka.stm._ + +Coordinated is the new global +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There is a new explicit mechanism for coordinated transactions. See the `Scala Transactors `_ and `Java Transactors `_ documentation for more information. Coordinated transactions and transactors are found in the ``akka.transactor`` package now. The usage of transactors has changed. + +Agents +^^^^^^ + +Agent is now in the akka-stm module and has moved to the ``akka.agent`` package. The implementation has been reworked and is now closer to Clojure agents. There is not much difference in general usage, the main changes involve interaction with the STM. + +While updates to Agents are asynchronous, the state of an Agent is always immediately available for reading by any thread. Agents are integrated with the STM - any dispatches made in a transaction are held until that transaction commits, and are discarded if it is retried or aborted. There is a new ``sendOff`` method for long-running or blocking update functions. + +---- + +Module akka-camel +----------------- + +Access to the CamelService managed by CamelServiceManager has changed: + +* Method service renamed to mandatoryService (Scala) +* Method service now returns Option[CamelService] (Scala) +* Introduced method getMandatoryService() (Java) +* Introduced method getService() (Java) + +|| **Scala** || +|| +``_ +import se.scalablesolutions.akka.camel.CamelServiceManager._ +import se.scalablesolutions.akka.camel.CamelService + +val o: Option[CamelService] = service +val s: CamelService = mandatoryService + +``_ || +|| **Java** || +|| +``_ +import se.scalablesolutions.akka.camel.CamelService; +import se.scalablesolutions.akka.japi.Option; +import static se.scalablesolutions.akka.camel.CamelServiceManager.*; + +Option o = getService(); +CamelService s = getMandatoryService(); + +``_ || + +Access to the CamelContext and ProducerTemplate managed by CamelContextManager has changed: + +* Method context renamed to mandatoryContext (Scala) +* Method template renamed to mandatoryTemplate (Scala) +* Method service now returns Option[CamelContext] (Scala) +* Method template now returns Option[ProducerTemplate] (Scala) +* Introduced method getMandatoryContext() (Java) +* Introduced method getContext() (Java) +* Introduced method getMandatoryTemplate() (Java) +* Introduced method getTemplate() (Java) + +|| **Scala** || +|| +``_ +import org.apache.camel.CamelContext +import org.apache.camel.ProducerTemplate + +import se.scalablesolutions.akka.camel.CamelContextManager._ + +val co: Option[CamelContext] = context +val to: Option[ProducerTemplate] = template + +val c: CamelContext = mandatoryContext +val t: ProducerTemplate = mandatoryTemplate + +``_ || +|| **Java** || +|| +``_ +import org.apache.camel.CamelContext; +import org.apache.camel.ProducerTemplate; + +import se.scalablesolutions.akka.japi.Option; +import static se.scalablesolutions.akka.camel.CamelContextManager.*; + +Option co = getContext(); +Option to = getTemplate(); + +CamelContext c = getMandatoryContext(); +ProducerTemplate t = getMandatoryTemplate(); + +``_ || + +The following methods have been renamed on class se.scalablesolutions.akka.camel.Message: + +* bodyAs(Class) has been renamed to getBodyAs(Class) +* headerAs(String, Class) has been renamed to getHeaderAs(String, Class) + +The API for waiting for consumer endpoint activation and de-activation has been changed + +* CamelService.expectEndpointActivationCount has been removed and replaced by CamelService.awaitEndpointActivation +* CamelService.expectEndpointDeactivationCount has been removed and replaced by CamelService.awaitEndpointDeactivation + +|| **Scala** || +|| +``_ +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.camel.CamelServiceManager._ + +val s = startCamelService +val actor = Actor.actorOf[SampleConsumer] + +// wait for 1 consumer being activated +s.awaitEndpointActivation(1) { + actor.start +} + +// wait for 1 consumer being de-activated +s.awaitEndpointDeactivation(1) { + actor.stop +} + +s.stop + +``_ || +|| **Java** || +|| +``_ +import java.util.concurrent.TimeUnit; +import se.scalablesolutions.akka.actor.ActorRef; +import se.scalablesolutions.akka.actor.Actors; +import se.scalablesolutions.akka.camel.CamelService; +import se.scalablesolutions.akka.japi.SideEffect; +import static se.scalablesolutions.akka.camel.CamelServiceManager.*; + +CamelService s = startCamelService(); +final ActorRef actor = Actors.actorOf(SampleUntypedConsumer.class); + +// wait for 1 consumer being activated +s.awaitEndpointActivation(1, new SideEffect() { + public void apply() { + actor.start(); + } +}); + +// wait for 1 consumer being de-activated +s.awaitEndpointDeactivation(1, new SideEffect() { + public void apply() { + actor.stop(); + } +}); + +s.stop(); + +``_ || + +- + +Module Akka-Http +---------------- + +Atmosphere support has been removed. If you were using akka.comet.AkkaServlet for Jersey support only, +you can switch that to: akka.http.AkkaRestServlet and it should work just like before. + +Atmosphere has been removed because we have a new async http support in the form of Akka Mist, a very thin bridge +between Servlet3.0/JettyContinuations and Actors, enabling Http-as-messages, read more about it here: +http://doc.akka.io/http#Mist%20-%20Lightweight%20Asynchronous%20HTTP + +If you really need Atmosphere support, you can add it yourself by following the steps listed at the start of: +http://doc.akka.io/comet + +Module akka-spring +------------------ + +The Akka XML schema URI has changed to http://akka.io/schema/akka + +``_ + + + + + + +``_ diff --git a/akka-docs/pending/migration-guide-0.7.x-0.8.x.rst b/akka-docs/pending/migration-guide-0.7.x-0.8.x.rst new file mode 100644 index 0000000000..5c45eb76c1 --- /dev/null +++ b/akka-docs/pending/migration-guide-0.7.x-0.8.x.rst @@ -0,0 +1,94 @@ +Migrate from 0.7.x to 0.8.x +=========================== + +This is a case-by-case migration guide from Akka 0.7.x (on Scala 2.7.7) to Akka 0.8.x (on Scala 2.8.x) +------------------------------------------------------------------------------------------------------ + +Cases: +====== + +Actor.send is removed and replaced in full with Actor.! +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + myActor send "test" + +becomes + +.. code-block:: scala + + myActor ! "test" + +Actor.! now has it's implicit sender defaulted to None +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + def !(message: Any)(implicit sender: Option[Actor] = None) + +"import Actor.Sender.Self" has been removed because it's not needed anymore +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Remove + +.. code-block:: scala + + import Actor.Sender.Self + +Actor.spawn now uses manifests instead of concrete class types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val someActor = spawn(classOf[MyActor]) + +becomes + +.. code-block:: scala + + val someActor = spawn[MyActor] + +Actor.spawnRemote now uses manifests instead of concrete class types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val someActor = spawnRemote(classOf[MyActor],"somehost",1337) + +becomes + +.. code-block:: scala + + val someActor = spawnRemote[MyActor]("somehost",1337) + +Actor.spawnLink now uses manifests instead of concrete class types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val someActor = spawnLink(classOf[MyActor]) + +becomes + +.. code-block:: scala + + val someActor = spawnLink[MyActor] + +Actor.spawnLinkRemote now uses manifests instead of concrete class types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val someActor = spawnLinkRemote(classOf[MyActor],"somehost",1337) + +becomes + +.. code-block:: scala + + val someActor = spawnLinkRemote[MyActor]("somehost",1337) + +**Transaction.atomic and friends are moved into Transaction.Local._ and Transaction.Global._** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We now make a difference between transaction management that are local within a thread and global across many threads (and actors). diff --git a/akka-docs/pending/migration-guide-0.8.x-0.9.x.rst b/akka-docs/pending/migration-guide-0.8.x-0.9.x.rst new file mode 100644 index 0000000000..81866e1993 --- /dev/null +++ b/akka-docs/pending/migration-guide-0.8.x-0.9.x.rst @@ -0,0 +1,169 @@ +**This document describes between the 0.8.x and the 0.9 release.** + +Background for the new ActorRef +=============================== + +In the work towards 0.9 release we have now done a major change to how Actors are created. In short we have separated identity and value, created an 'ActorRef' that holds the actual Actor instance. This allows us to do many great things such as for example: + +* Create serializable, immutable, network-aware Actor references that can be freely shared across the network. They "remember" their origin and will always work as expected. +* Not only kill and restart the same supervised Actor instance when it has crashed (as we do now), but dereference it, throw it away and make it eligible for garbage collection. +* etc. much more + +These work very much like the 'PID' (process id) in Erlang. + +These changes means that there is no difference in defining Actors. You still use the old Actor trait, all methods are there etc. But you can't just new this Actor up and send messages to it since all its public API methods are gone. They now reside in a new class; 'ActorRef' and use need to use instances of this class to interact with the Actor (sending messages etc.). + +Here is a short migration guide with the things that you have to change. It is a big conceptual change but in practice you don't have to change much. + +Migration Guide +=============== + +Creating Actors with default constructor +---------------------------------------- + +From: + +.. code-block:: scala + + val a = new MyActor + a ! msg + +To: + +.. code-block:: scala + + import Actor._ + val a = actorOf[MyActor] + a ! msg + +You can also start it in the same statement: + +.. code-block:: scala + + val a = actorOf[MyActor].start + +Creating Actors with non-default constructor +-------------------------------------------- + +From: + +.. code-block:: scala + + val a = new MyActor(..) + a ! msg + +To: + +.. code-block:: scala + + import Actor._ + val a = actorOf(new MyActor(..)) + a ! msg + +Use of 'self' ActorRef API +-------------------------- + +Where you have used 'this' to refer to the Actor from within itself now use 'self': + +.. code-block:: scala + + self ! MessageToMe + +Now the Actor trait only has the callbacks you can implement: +* receive +* postRestart/preRestart +* init/shutdown + +It has no state at all. + +All API has been moved to ActorRef. The Actor is given its ActorRef through the 'self' member variable. +Here you find functions like: +* !, !!, !!! and forward +* link, unlink, startLink, spawnLink etc +* makeTransactional, makeRemote etc. +* start, stop +* etc. + +Here you also find fields like +* dispatcher = ... +* id = ... +* lifeCycle = ... +* faultHandler = ... +* trapExit = ... +* etc. + +This means that to use them you have to prefix them with 'self', like this: + +.. code-block:: scala + + self ! Message + +However, for convenience you can import these functions and fields like below, which will allow you do drop the 'self' prefix: + +.. code-block:: scala + + class MyActor extends Actor { + import self._ + id = ... + dispatcher = ... + spawnLink[OtherActor] + ... + } + +Serialization +============= + +If you want to serialize it yourself, here is how to do it: + +.. code-block:: scala + + val actorRef1 = actorOf[MyActor] + + val bytes = actorRef1.toBinary + + val actorRef2 = ActorRef.fromBinary(bytes) + +If you are also using Protobuf then you can use the methods that work with Protobuf's Messages directly. + +.. code-block:: scala + + val actorRef1 = actorOf[MyActor] + + val protobufMessage = actorRef1.toProtocol + + val actorRef2 = ActorRef.fromProtocol(protobufMessage) + + Camel +====== + +Some methods of the se.scalablesolutions.akka.camel.Message class have been deprecated in 0.9. These are + +.. code-block:: scala + + package se.scalablesolutions.akka.camel + + case class Message(...) { + // ... + @deprecated def bodyAs[T](clazz: Class[T]): T + @deprecated def setBodyAs[T](clazz: Class[T]): Message + // ... + } + +They will be removed in 1.0. Instead use + +.. code-block:: scala + + package se.scalablesolutions.akka.camel + + case class Message(...) { + // ... + def bodyAs[T](implicit m: Manifest[T]): T = + def setBodyAs[T](implicit m: Manifest[T]): Message + // ... + } + +Usage example: +``_ +val m = Message(1.4) +val b = m.bodyAs[String] +``_ diff --git a/akka-docs/pending/migration-guide-0.9.x-0.10.x.rst b/akka-docs/pending/migration-guide-0.9.x-0.10.x.rst new file mode 100644 index 0000000000..68ec0cb087 --- /dev/null +++ b/akka-docs/pending/migration-guide-0.9.x-0.10.x.rst @@ -0,0 +1,45 @@ +Migration Guide from Akka 0.9.x to Akka 0.10.x +============================================== + +Module akka-camel +----------------- + +The following list summarizes the breaking changes since Akka 0.9.1. + +* CamelService moved from package se.scalablesolutions.akka.camel.service one level up to se.scalablesolutions.akka.camel. +* CamelService.newInstance removed. For starting and stopping a CamelService, applications should use +** CamelServiceManager.startCamelService and +** CamelServiceManager.stopCamelService. +* Existing def receive = produce method definitions from Producer implementations must be removed (resolves compile error: method receive needs override modifier). +* The Producer.async method and the related Sync trait have been removed. This is now fully covered by Camel's `asynchronous routing engine `_. +* @consume annotation can not placed any longer on actors (i.e. on type-level), only on typed actor methods. Consumer actors must mixin the Consumer trait. +* @consume annotation moved to package se.scalablesolutions.akka.camel. + +Logging +------- + +We've switched to Logback (SLF4J compatible) for the logging, if you're having trouble seeing your log output you'll need to make sure that there's a logback.xml available on the classpath or you'll need to specify the location of the logback.xml file via the system property, ex: -Dlogback.configurationFile=/path/to/logback.xml + +Configuration +------------- + +* The configuration is now JSON-style (see below). +* Now you can define the time-unit to be used throughout the config file: + +.. code-block:: ruby + + akka { + version = "0.10" + time-unit = "seconds" # default timeout time unit for all timeout properties throughout the config + + actor { + timeout = 5 # default timeout for future based invocations + throughput = 5 # default throughput for ExecutorBasedEventDrivenDispatcher + } + ... + } + +RemoteClient events +------------------- + +All events now has a reference to the RemoteClient instance instead of 'hostname' and 'port'. This is more flexible. Enables simpler reconnecting etc. diff --git a/akka-docs/pending/migration-guides.rst b/akka-docs/pending/migration-guides.rst new file mode 100644 index 0000000000..4c44977d2f --- /dev/null +++ b/akka-docs/pending/migration-guides.rst @@ -0,0 +1,8 @@ +Here are migration guides for the latest releases +================================================= + +* `Migrate 0.7.x -> 0.8.x `_ +* `Migrate 0.8.x -> 0.9.x `_ +* `Migrate 0.9.x -> 0.10.x `_ +* `Migrate 0.10.x -> 1.0.x `_ +* `Migrate 1.0.x -> 1.1.x `_ diff --git a/akka-docs/pending/release-notes.rst b/akka-docs/pending/release-notes.rst new file mode 100644 index 0000000000..2000b5a1d6 --- /dev/null +++ b/akka-docs/pending/release-notes.rst @@ -0,0 +1,656 @@ +Release Notes +============== + +Changes listed in no particular order. + +Current Development 1.1-SNAPSHOT +================================ + +||~ =Type= ||~ =Changes= ||~ =By= || +|| **UPD** || improve FSM DSL: make onTransition syntax nicer || Roland Kuhn || + +Release 1.1-M1 +============== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| **ADD** || #647 Extract an akka-camel-typed module out of akka-camel for optional typed actor support || Martin Krasser || +|| **ADD** || #654 Allow consumer actors to acknowledge in-only message exchanges || Martin Krasser || +|| **ADD** || #669 Support self.reply in preRestart and postStop after exception in receive || Martin Krasser || +|| **ADD** || #682 Support for fault-tolerant Producer actors || Martin Krasser || +|| **ADD** || Move TestKit to akka-testkit and add CallingThreadDispatcher || Roland Kuhn || +|| **ADD** || Remote Client message buffering transaction log for buffering messages failed to send due to network problems. Flushes the buffer on reconnect. || Jonas Bonér || +|| **ADD** || Added trait simulate network problems/errors to be used for remote actor testing || Jonas Bonér || +|| **ADD** || Add future and await methods to Agent || Peter Vlugter || +|| **ADD** || #586 Allow explicit reconnect for RemoteClient || Viktor Klang || +|| **ADD** || #587 Dead letter sink queue for messages sent through RemoteClient that didn't get sent due to connection failure || Viktor Klang || +|| **ADD** || #598 actor.id when using akka-spring should be the id of the spring bean || Viktor Klang || +|| **ADD** || #652 Reap expired futures from ActiveRemoteClientHandler || Viktor Klang || +|| **ADD** || #656 Squeeze more out of EBEDD? || Viktor Klang || +|| **ADD** || #715 EventHandler.error should be usable without Throwable || Viktor Klang || +|| **ADD** || #717 Add ExecutionHandler to NettyRemoteServer for more performance and scalability || Viktor Klang || +|| **ADD** || #497 Optimize remote sends done in local scope || Viktor Klang || +|| **ADD** || #633 Add support for Scalaz in akka-modules || Derek Williams || +|| **ADD** || #677 Add map, flatMap, foreach, and filter to Future || Derek Williams || +|| **ADD** || #661 Optimized Future's internals || Derek Williams || +|| **ADD** || #685 Optimize execution of Futures || Derek Williams || +|| **ADD** || #711 Make Future.completeWith work with an uncompleted Future || Derek Williams || +|| **UPD** || #667 Upgrade to Camel 2.7.0 || Martin Krasser || +|| **UPD** || Updated HawtDispatch to 1.1 || Hiram Chirino || +|| **UPD** || #688 Update Akka 1.1-SNAPSHOT to Scala 2.9.0-RC1 || Viktor Klang || +|| **UPD** || #718 Add HawtDispatcher to akka-modules || Viktor Klang || +|| **UPD** || #698 Deprecate client-managed actors || Viktor Klang || +|| **UPD** || #730 Update Akka and Akka Modules to SBT 0.7.6-RC0 || Viktor Klang || +|| **UPD** || #663 Update to latest scalatest || Derek Williams || +|| **FIX** || Misc cleanup, API changes and refactorings || Jonas Bonér || +|| **FIX** || #675 preStart() is called twice when creating new instance of TypedActor || Debasish Ghosh || +|| **FIX** || #704 Write docs for Java Serialization || Debasish Ghosh || +|| **FIX** || #645 Change Futures.awaitAll to not throw FutureTimeoutException but return a List[Option[Any]] || Viktor Klang || +|| **FIX** || #681 Clean exit using server-managed remote actor via client || Viktor Klang || +|| **FIX** || #720 Connection loss when sending to a dead remote actor || Viktor Klang || +|| **FIX** || #593 Move Jetty specific stuff (with deps) from akka-http to akka-kernel || Viktor Klang || +|| **FIX** || #638 ActiveRemoteClientHandler - Unexpected exception from downstream in remote client || Viktor Klang || +|| **FIX** || #655 Remote actors with non-uuid names doesnt work for req./reply-pattern || Viktor Klang || +|| **FIX** || #588 RemoteClient.shutdown does not remove client from Map with clients || Viktor Klang || +|| **FIX** || #672 Remoting breaks if mutual DNS lookup isn't possible || Viktor Klang || +|| **FIX** || #699 Remote typed actor per-session server won't start if called method has no result || Viktor Klang || +|| **FIX** || #702 Handle ReadTimeoutException in akka-remote || Viktor Klang || +|| **FIX** || #708 Fall back to Akka classloader if event-handler class cannot be found. || Viktor Klang || +|| **FIX** || #716 Split akka-http and clean-up dependencies || Viktor Klang || +|| **FIX** || #721 Inability to parse/load the Config should do a System.exit(-1) || Viktor Klang || +|| **FIX** || #722 Race condition in Actor hotswapping || Viktor Klang || +|| **FIX** || #723 MessageSerializer CNFE regression || Viktor Klang || +|| **FIX** || #680 Remote TypedActor behavior differs from local one when sending to generic interfaces || Viktor Klang || +|| **FIX** || #659 Calling await on a Future that is expired and uncompleted should throw an exception || Derek Williams || +|| **REM** || #626 Update and clean up dependencies || Viktor Klang || +|| **REM** || #623 Remove embedded-repo (Akka + Akka Modules) || Viktor Klang || +|| **REM** || #686 Remove SBinary || Viktor Klang || + +Release 1.0-RC6 +=============== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| **FIX** || #628 Supervied TypedActors fails to restart || Viktor Klang || +|| **FIX** || #629 Stuck upon actor invocation || Viktor Klang || + +Release 1.0-RC5 +=============== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| **FIX** || Source JARs published to 'src' instead of 'source' || Odd Moller || +|| **FIX** || #612 Conflict between Spring autostart=true for Consumer actors and || Martin Krasser || +|| **FIX** || #613 Change Akka XML schema URI to http://akka.io/schema/akka || Martin Krasser || +|| **FIX** || Spring XSD namespace changed from 'akkasource.org' to 'akka.io' || Viktor Klang || +|| **FIX** || Checking for remote secure cookie is disabled by default if no akka.conf is loaded || Viktor Klang || +|| **FIX** || Changed Casbah to ScalaToolsRepo for akka-sbt-plugin || Viktor Klang || +|| **FIX** || ActorRef.forward now doesn't require the sender to be set on the message || Viktor Klang || + +Release 1.0-RC3 +=============== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| **ADD** || #568 Add autostart attribute to Spring actor configuration || Viktor Klang || +|| **ADD** || #586 Allow explicit reconnect for remote clients || Viktor Klang || +|| **ADD** || #587 Add possibility for dead letter queues for failed remote sends || Viktor Klang || +|| **ADD** || #497 Optimize remote send in local scope || Viktor Klang || +|| **ADD** || Improved Java Actor API: akka.actor.Actors || Viktor Klang || +|| **ADD** || Improved Scala Actor API: akka.actor.Actor || Viktor Klang || +|| **ADD** || #148 Create a testing framework for testing Actors || Roland Kuhn || +|| **ADD** || Support Replica Set/Replica Pair connection modes with MongoDB Persistence || Brendan McAdams || +|| **ADD** || User configurable Write Concern settings for MongoDB Persistence || Brendan McAdams || +|| **ADD** || Support for configuring MongoDB Persistence with MongoDB's URI Connection String || Brendan McAdams || +|| **ADD** || Support for Authentication with MongoDB Persistence || Brendan McAdams || +|| **FIX** || Misc bug fixes || Team || +|| **FIX** || #603 Race condition in Remote send || Viktor Klang || +|| **FIX** || #594 Log statement in RemoteClientHandler was wrongly formatted || Viktor Klang || +|| **FIX** || #580 Message uuids must be generated || Viktor Klang || +|| **FIX** || #583 Serialization classloader has a visibility issue || Viktor Klang || +|| **FIX** || #598 By default the bean ID should become the actor id for Spring actor configuration || Viktor Klang || +|| **FIX** || #577 RemoteClientHandler swallows certain exceptions || Viktor Klang || +|| **FIX** || #581 Fix edgecase where an exception could not be deserialized || Viktor Klang || +|| **FIX** || MongoDB write success wasn't being properly checked; fixed (integrated w/ new write concern features) || Brendan McAdams || +|| **UPD** || Improvements to FSM module akka.actor.FSM || Manie & Kuhn || +|| **UPD** || Changed Akka URI to http://akka.io. Reflects both XSDs, Maven repositories etc. || Jonas Bonér || +|| **REM** || #574 Remote RemoteClient, RemoteServer and RemoteNode || Viktor Klang || +|| **REM** || object UntypedActor, object ActorRegistry, class RemoteActor, class RemoteUntypedActor, class RemoteUntypedConsumerActor || Viktor Klang || + +Release 1.0-RC1 +=============== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| **ADD** || #477 Added support for Remote Agents || Viktor Klang || +|| **ADD** || #460 Hotswap for Java API (UntypedActor) || Viktor Klang || +|| **ADD** || #471 Added support for TypedActors to return Java Option || Viktor Klang || +|| **ADD** || New design and API for more fluent and intuitive FSM module || Roland Kuhn || +|| **ADD** || Added secure cookie based remote node authentication || Jonas Bonér || +|| **ADD** || Untrusted safe mode for remote server || Jonas Bonér || +|| **ADD** || Refactored config file format - added list of enabled modules etc. || Jonas Bonér || +|| **ADD** || Docs for Dataflow Concurrency || Jonas Bonér || +|| **ADD** || Made remote message frame size configurable || Jonas Bonér || +|| **ADD** || #496 Detect when Remote Client disconnects || Jonas Bonér || +|| **ADD** || #472 Improve API to wait for endpoint activation/deactivation (`more `_ ...) || Martin Krasser || +|| **ADD** || #473 Allow consumer actors to customize their own routes (`more `_ ...) || Martin Krasser || +|| **ADD** || #504 Add session bound server managed remote actors || Paul Pach || +|| **ADD** || DSL for FSM || Irmo Manie || +|| **ADD** || Shared unit test for all dispatchers to enforce Actor Model || Viktor Klang || +|| **ADD** || #522 Make stacking optional for become and HotSwap || Viktor Klang || +|| **ADD** || #524 Make frame size configurable for client&server || Bonér & Klang || +|| **ADD** || #526 Add onComplete callback to Future || Viktor Klang || +|| **ADD** || #536 Document Channel-abstraction for later replies || Viktor Klang || +|| **ADD** || #540 Include self-reference as parameter to HotSwap || Viktor Klang || +|| **ADD** || #546 Include Garrick Evans' Akka-mist into master || Viktor Klang || +|| **ADD** || #438 Support remove operation in PersistentVector || Scott Clasen || +|| **ADD** || #229 Memcached protocol support for Persistence module || Scott Clasen || +|| **ADD** || Amazon SimpleDb support for Persistence module || Scott Clasen || +|| **FIX** || #518 refactor common storage bakend to use bulk puts/gets where possible || Scott Clasen || +|| **FIX** || #532 Prevent persistent datatypes with same uuid from corrupting a TX || Scott Clasen || +|| **FIX** || #464 ThreadPoolBuilder should be rewritten to be an immutable builder || Viktor Klang || +|| **FIX** || #449 Futures.awaitOne now uses onComplete listeners || Viktor Klang || +|| **FIX** || #486 Fixed memory leak caused by Configgy that prevented full unload || Viktor Klang || +|| **FIX** || #488 Fixed race condition in EBEDD restart || Viktor Klang || +|| **FIX** || #492 Fixed race condition in Scheduler || Viktor Klang || +|| **FIX** || #493 Switched to non-https repository for JBoss artifacts || Viktor Klang || +|| **FIX** || #481 Exception when creating an actor now behaves properly when supervised || Viktor Klang || +|| **FIX** || #498 Fixed no-op in supervision DSL || Viktor Klang || +|| **FIX** || #491 reply and reply_? now sets a sender reference || Viktor Klang || +|| **FIX** || #519 NotSerializableError when using Remote Typed Actors || Viktor Klang || +|| **FIX** || #523 Message.toString is called all the time for incomign messages, expensive || Viktor Klang || +|| **FIX** || #537 Make sure top folder is included in sources jar || Viktor Klang || +|| **FIX** || #529 Remove Scala version number from Akka artifact ids || Viktor Klang || +|| **FIX** || #533 Can't set LifeCycle from the Java API || Viktor Klang || +|| **FIX** || #542 Make Future-returning Remote Typed Actor methods use onComplete || Viktor Klang || +|| **FIX** || #479 Do not register listeners when CamelService is turned off by configuration || Martin Krasser || +|| **FIX** || Fixed bug with finding TypedActor by type in ActorRegistry || Jonas Bonér || +|| **FIX** || #515 race condition in FSM StateTimeout Handling || Irmo Manie || +|| **UPD** || Akka package from "se.scalablesolutions.akka" to "akka" || Viktor Klang || +|| **UPD** || Update Netty to 3.2.3.Final || Viktor Klang || +|| **UPD** || #458 Camel to 2.5.0 || Martin Krasser || +|| **UPD** || #458 Spring to 3.0.4.RELEASE || Martin Krasser || +|| **UPD** || #458 Jetty to 7.1.6.v20100715 || Martin Krasser || +|| **UPD** || Update to Scala 2.8.1 || Jonas Bonér || +|| **UPD** || Changed remote server default port to 2552 (AKKA) || Jonas Bonér || +|| **UPD** || Cleaned up and made remote protocol more effifient || Jonas Bonér || +|| **UPD** || #528 RedisPersistentRef should not throw in case of missing key || Debasish Ghosh || +|| **UPD** || #531 Fix RedisStorage add() method in Java API || Debasish Ghosh || +|| **UPD** || #513 Implement snapshot based persistence control in SortedSet || Debasish Ghosh || +|| **UPD** || #547 Update FSM docs || Irmo Manie || +|| **UPD** || #548 Update AMQP docs || Irmo Manie || +|| **REM** || Atmosphere integration, replace with Mist || Klang @ Evans || +|| **REM** || JGroups integration, doesn't play with cloud services :/ || Viktor Klang || + +Release 1.0-MILESTONE1 +====================== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| **ADD** || Splitted akka-core up in akka-actor, akka-typed-actor & akka-remote || Jonas Bonér || +|| **ADD** || Added meta-data to network protocol || Jonas Bonér || +|| **ADD** || HotSwap and actor.become now uses a stack of PartialFunctions with API for pushing and popping the stack || Jonas Bonér || +|| **ADD** || #440 Create typed actors with constructor args || Michael Kober || +|| **ADD** || #322 Abstraction for unification of sender and senderFuture for later reply || Michael Kober || +|| **ADD** || #364 Serialization for TypedActor proxy reference || Michael Kober || +|| **ADD** || #423 Support configuration of Akka via Spring || Michael Kober || +|| **FIX** || #426 UUID wrong for remote proxy for server managed actor || Michael Kober || +|| **ADD** || #378 Support for server initiated remote TypedActor and UntypedActor in Spring config || Michael Kober || +||< **ADD** ||< #194 Support for server-managed typed actor ||< Michael Kober || +|| **ADD** || #447 Allow Camel service to be turned off by configuration || Martin Krasser || +|| **ADD** || #457 JavaAPI improvements for akka-camel (please read the `migration guide `_) || Martin Krasser || +|| **ADD** || #465 Dynamic message routing to actors (`more `_ ...) || Martin Krasser || +|| **FIX** || #410 Use log configuration from config directory || Martin Krasser || +|| **FIX** || #343 Some problems with persistent structures || Debasish Ghosh || +|| **FIX** || #430 Refactor / re-implement MongoDB adapter so that it conforms to the guidelines followed in Redis and Cassandra modules || Debasish Ghosh || +|| **FIX** || #436 ScalaJSON serialization does not map Int data types properly when used within a Map || Debasish Ghosh || +|| **ADD** || #230 Update redisclient to be Redis 2.0 compliant || Debasish Ghosh || +|| **FIX** || #435 Mailbox serialization does not retain messages || Debasish Ghosh || +|| **ADD** || #445 Integrate type class based serialization of sjson into Akka || Debasish Ghosh || +|| **FIX** || #480: Regression multibulk replies redis client || Debasish Ghosh || +|| **FIX** || #415 Publish now generate source and doc jars || Viktor Klang || +|| **FIX** || #420 REST endpoints should be able to be processed in parallel || Viktor Klang || +|| **FIX** || #422 Dispatcher config should work for ThreadPoolBuilder-based dispatchers || Viktor Klang || +|| **FIX** || #401 ActorRegistry should not leak memory || Viktor Klang || +|| **FIX** || #250 Performance optimization for ExecutorBasedEventDrivenDispatcher || Viktor Klang || +|| **FIX** || #419 Rename init and shutdown callbacks to preStart and postStop, and remove initTransactionalState || Viktor Klang || +|| **FIX** || #346 Make max no of restarts (and within) are now both optional || Viktor Klang || +|| **FIX** || #424 Actors self.supervisor not set by the time init() is called when started by startLink() || Viktor Klang || +|| **FIX** || #427 spawnLink and startLink now has the same dispatcher semantics || Viktor Klang || +|| **FIX** || #413 Actor shouldn't process more messages when waiting to be restarted (HawtDispatcher still does) || Viktor Klang || +|| **FIX** || !! and !!! now do now not block the actor when used in remote actor || Viktor Klang || +|| **FIX** || RemoteClient now reconnects properly || Viktor Klang || +|| **FIX** || Logger.warn now properly works with varargs || Viktor Klang || +|| **FIX** || #450 Removed ActorRef lifeCycle boilerplate: Some(LifeCycle(Permanent)) => Permanent || Viktor Klang || +|| **FIX** || Moved ActorRef.trapExit into ActorRef.faultHandler and removed Option-boilerplate from faultHandler || Viktor Klang || +|| **FIX** || ThreadBasedDispatcher cheaper for idling actors, also benefits from all that is ExecutorBasedEventDrivenDispatcher || Viktor Klang || +|| **FIX** || Fixing Futures.future, uses Actor.spawn under the hood, specify dispatcher to control where block is executed || Viktor Klang || +|| **FIX** || #469 Akka "dist" now uses a root folder to avoid loitering if unzipped in a folder || Viktor Klang || +|| **FIX** || Removed ScalaConfig, JavaConfig and rewrote Supervision configuration || Viktor Klang || +|| **UPD** || Jersey to 1.3 || Viktor Klang || +|| **UPD** || Atmosphere to 0.6.2 || Viktor Klang || +|| **UPD** || Netty to 3.2.2.Final || Viktor Klang || +|| **ADD** || Changed config file priority loading and added config modes. || Viktor Klang || +|| **ADD** || #411 Bumped Jetty to v 7 and migrated to it's eclipse packages || Viktor Klang || +|| **ADD** || #414 Migrate from Grizzly to Jetty for Akka Microkernel || Viktor Klang || +|| **ADD** || #261 Add Java API for 'routing' module || VIktor Klang || +|| **ADD** || #262 Add Java API for Agent || Viktor Klang || +|| **ADD** || #264 Add Java API for Dataflow || Viktor Klang || +|| **ADD** || Using JerseySimpleBroadcaster instead of JerseyBroadcaster in AkkaBroadcaster || Viktor Klang || +|| **ADD** || #433 Throughput deadline added for ExecutorBasedEventDrivenDispatcher || Viktor Klang || +|| **ADD** || Add possibility to set default cometSupport in akka.conf || Viktor Klang || +|| **ADD** || #451 Added possibility to use akka-http as a standalone REST server || Viktor Klang || +|| **ADD** || #446 Added support for Erlang-style receiveTimeout || Viktor Klang || +|| **ADD** || #462 Added support for suspend/resume of processing individual actors mailbox, should give clearer restart semantics || Viktor Klang || +|| **ADD** || #466 Actor.spawn now takes an implicit dispatcher to specify who should run the block || Viktor Klang || +|| **ADD** || #456 Added map to Future and Futures.awaitMap || Viktor Klang || +|| **REM** || #418 Remove Lift sample module and docs || Viktor Klang || +|| **REM** || Removed all Reactor-based dispatchers || Viktor Klang || +|| **REM** || Removed anonymous actor factories || Viktor Klang || +|| **ADD** || Voldemort support for akka-persistence || Scott Clasen || +|| **ADD** || HBase support for akka-persistence || David Greco || +|| **ADD** || CouchDB support for akka-persistence || Yung-Luen Lan & Kahlen || +|| **ADD** || #265 Java API for AMQP module || Irmo Manie || + +Release 0.10 - Aug 21 2010 +========================== + +``_ + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +||< **ADD** ||< Added new Actor type: UntypedActor for Java API ||< Jonas Bonér || +||< **ADD** ||< #26 Deep serialization of Actor including its mailbox ||< Jonas Bonér || +||< **ADD** ||< Rewritten network protocol. More efficient and cleaner. ||< Jonas Bonér || +||< **ADD** ||< Rewritten Java Active Object tests into Scala to be able to run the in SBT. ||< Jonas Bonér || +||< **ADD** ||< Added isDefinedAt method to Actor for checking if it can receive a certain message ||< Jonas Bonér || +||< **ADD** ||< Added caching of Active Object generated class bytes, huge perf improvement ||< Jonas Bonér || +||< **ADD** ||< Added RemoteClient Listener API ||< Jonas Bonér || +||< **ADD** ||< Added methods to retreive children from a Supervisor ||< Jonas Bonér || +||< **ADD** ||< Rewritten Supervisor to become more clear and "correct" ||< Jonas Bonér || +||< **ADD** ||< Added options to configure a blocking mailbox with custom capacity ||< Jonas Bonér || +||< **ADD** ||< Added RemoteClient reconnection time window configuration option ||< Jonas Bonér || +||< **ADD** ||< Added ActiveObjectContext with sender reference etc ||< Jonas Bonér || +||< **ADD** ||< #293 Changed config format to JSON-style ||< Jonas Bonér || +||< **ADD** ||< #302: Incorporate new ReceiveTimeout in Actor serialization ||< Jonas Bonér || +||< **ADD** ||< Added Java API docs and made it comparable with Scala API docs. 1-1 mirroring ||< Jonas Bonér || +||< **ADD** ||< Renamed Active Object to Typed Actor ||< Jonas Bonér || +||< **ADD** ||< Enhanced Typed Actor: remoting, "real" restart upon failure etc. ||< Jonas Bonér || +||< **ADD** ||< Typed Actor now inherits Actor and is a full citizen in the Actor world. ||< Jonas Bonér || +||< **ADD** ||< Added support for remotely shutting down a remote actor ||< Jonas Bonér || +||< **ADD** ||< #224 Add support for Camel in typed actors (`more `_ ...) ||< Martin Krasser || +||< **ADD** || #282 Producer trait should implement Actor.receive (`more `_ ...) || Martin Krasser || +||< **ADD** || #271 Support for bean scope prototype in akka-spring || Johan Rask || +||< **ADD** || Support for DI of values and bean references on target instance in akka-spring || Johan Rask || +||< **ADD** || #287 Method annotated with @postrestart in ActiveObject is not called during restart || Johan Rask || +|| **ADD** || Support for ApplicationContextAware in akka-spring || Johan Rask || +|| **ADD** || #199 Support shutdown hook in TypedActor || Martin Krasser || +|| **ADD** || #266 Access to typed actors from user-defined Camel routes (`more `_ ...) || Martin Krasser || +|| **ADD** || #268 Revise akka-camel documentation (`more `_ ...) || Martin Krasser || +|| **ADD** || #289 Support for Spring configuration element (`more `_ ...) || Martin Krasser || +|| **ADD** || #296 TypedActor lifecycle management || Martin Krasser || +|| **ADD** || #297 Shutdown routes to typed actors (`more `_ ...) || Martin Krasser || +|| **ADD** || #314 akka-spring to support typed actor lifecycle management (`more `_ ...) || Martin Krasser || +|| **ADD** || #315 akka-spring to support configuration of shutdown callback method (`more `_ ...) || Martin Krasser || +|| **ADD** || Fault-tolerant consumer actors and typed consumer actors (`more `_ ...) || Martin Krasser || +|| **ADD** || #320 Leverage Camel's non-blocking routing engine (`more `_ ...) || Martin Krasser || +|| **ADD** || #335 Producer trait should allow forwarding of results || Martin Krasser || +|| **ADD** || #339 Redesign of Producer trait (pre/post processing hooks, async in-out) (`more `_ ...) || Martin Krasser || +|| **ADD** || Non-blocking, asynchronous routing example for akka-camel (`more `_ ...) || Martin Krasser || +|| **ADD** || #333 Allow applications to wait for endpoints being activated (`more `_ ...) || Martin Krasser || +|| **ADD** || #356 Support @consume annotations on typed actor implementation class || Martin Krasser || +|| **ADD** || #357 Support untyped Java actors as endpoint consumer || Martin Krasser || +|| **ADD** || #366 CamelService should be a singleton || Martin Krasser || +|| **ADD** || #392 Support untyped Java actors as endpoint producer || Martin Krasser || +|| **ADD** || #393 Redesign CamelService singleton to be a CamelServiceManager (`more `_ ...) || Martin Krasser || +|| **ADD** || #295 Refactoring Actor serialization to type classes || Debasish Ghosh || +|| **ADD** || #317 Change documentation for Actor Serialization || Debasish Ghosh || +|| **ADD** || #388 Typeclass serialization of ActorRef/UntypedActor isn't Java friendly || Debasish Ghosh || +|| **ADD** || #292 Add scheduleOnce to Scheduler || Irmo Manie || +|| **ADD** || #308 Initial receive timeout on actor || Irmo Manie || +|| **ADD** || Redesign of AMQP module (`more `_ ...) || Irmo Manie || +|| **ADD** || Added "become(behavior: Option[Receive])" to Actor || Viktor Klang || +|| **ADD** || Added "find[T](f: PartialFunction[ActorRef,T]) : Option[T]" to ActorRegistry || Viktor Klang || +|| **ADD** || #369 Possibility to configure dispatchers in akka.conf || Viktor Klang || +|| **ADD** || #395 Create ability to add listeners to RemoteServer || Viktor Klang || +|| **ADD** || #225 Add possibility to use Scheduler from TypedActor || Viktor Klang || +|| **ADD** || #61 Integrate new persistent datastructures in Scala 2.8 || Peter Vlugter || +|| **ADD** || Expose more of what Multiverse can do || Peter Vlugter || +|| **ADD** || #205 STM transaction settings || Peter Vlugter || +|| **ADD** || #206 STM transaction deferred and compensating || Peter Vlugter || +|| **ADD** || #232 Expose blocking transactions || Peter Vlugter || +|| **ADD** || #249 Expose Multiverse Refs for primitives || Peter Vlugter || +|| **ADD** || #390 Expose transaction propagation level in multiverse || Peter Vlugter || +|| **ADD** || Package objects for importing local/global STM || Peter Vlugter || +|| **ADD** || Java API for the STM || Peter Vlugter || +|| **ADD** || #379 Create STM Atomic templates for Java API || Peter Vlugter || +|| **ADD** || #270 SBT plugin for Akka || Peter Vlugter || +|| **ADD** || #198 support for ThreadBasedDispatcher in Spring config || Michael Kober || +|| **ADD** || #377 support HawtDispatcher in Spring config || Michael Kober || +|| **ADD** || #376 support Spring config for untyped actors || Michael Kober || +|| **ADD** || #200 support WorkStealingDispatcher in Spring config || Michael Kober || +|| **UPD** || #336 RabbitMQ 1.8.1 || Irmo Manie || +|| **UPD** || #288 Netty to 3.2.1.Final || Viktor Klang || +|| **UPD** || Atmosphere to 0.6.1 || Viktor Klang || +|| **UPD** || Lift to 2.8.0-2.1-M1 || Viktor Klang || +|| **UPD** || Camel to 2.4.0 || Martin Krasser || +|| **UPD** || Spring to 3.0.3.RELEASE || Martin Krasser || +|| **UPD** || Multiverse to 0.6 || Peter Vlugter || +|| **FIX** || Fixed bug with stm not being enabled by default when no AKKA_HOME is set || Jonas Bonér || +|| **FIX** || Fixed bug in network manifest serialization || Jonas Bonér || +|| **FIX** || Fixed bug Remote Actors || Jonas Bonér || +|| **FIX** || Fixed memory leak in Active Objects || Jonas Bonér || +|| **FIX** || Fixed indeterministic deadlock in Transactor restart || Jonas Bonér || +|| **FIX** || #325 Fixed bug in STM with dead hanging CountDownCommitBarrier || Jonas Bonér || +|| **FIX** || #316: NoSuchElementException during ActiveObject restart || Jonas Bonér || +|| **FIX** || #256: Tests for ActiveObjectContext || Jonas Bonér || +|| **FIX** || Fixed bug in restart of Actors with 'Temporary' life-cycle || Jonas Bonér || +|| **FIX** || #280 Tests fail if there is no akka.conf set || Jonas Bonér || +|| **FIX** || #286 unwanted transitive dependencies from Geronimo project || Viktor Klang || +|| **FIX** || Atmosphere comet comment to use stream instead of writer || Viktor Klang || +|| **FIX** || #285 akka.conf is now used as defaults for Akka REST servlet init parameters || Viktor Klang || +|| **FIX** || #321 fixed performance regression in ActorRegistry || Viktor Klang || +|| **FIX** || #286 geronimo servlet 2.4 dep is no longer transitively loaded || Viktor Klang || +|| **FIX** || #334 partial lift sample rewrite to fix breakage || Viktor Klang || +|| **FIX** || Fixed a memory leak in ActorRegistry || Viktor Klang || +|| **FIX** || Fixed a race-condition in Cluster || Viktor Klang || +|| **FIX** || #355 Switched to Array instead of List on ActorRegistry return types || Viktor Klang || +|| **FIX** || #352 ActorRegistry.actorsFor(class) now checks isAssignableFrom || Viktor Klang || +|| **FIX** || Fixed a race condition in ActorRegistry.register || Viktor Klang || +|| **FIX** || #337 Switched from Configgy logging to SLF4J, better for OSGi || Viktor Klang || +|| **FIX** || #372 Scheduler now returns Futures to cancel tasks || Viktor Klang || +|| **FIX** || #306 JSON serialization between remote actors is not transparent || Debasish Ghosh || +|| **FIX** || #204 Reduce object creation in STM || Peter Vlugter || +|| **FIX** || #253 Extend Multiverse BasicRef rather than wrap ProgrammaticRef || Peter Vlugter || +|| **REM** || Removed pure POJO-style Typed Actor (old Active Object) || Jonas Bonér || +|| **REM** || Removed Lift as a dependency for Akka-http || Viktor Klang || +|| **REM** || #294 Remove reply and reply_? from Actor || Viktor Klang || +|| **REM** || Removed one field in Actor, should be a minor memory reduction for high actor quantities || Viktor Klang || +|| **FIX** || #301 DI does not work in akka-spring when specifying an interface || Johan Rask || +|| **FIX** || #328 +trapExit should pass through self with Exit to supervisor || Irmo Manie || +|| **FIX** || Fixed warning when deregistering listeners || Martin Krasser || +|| **FIX** || Added camel-jetty-2.4.0.1 to Akka's embedded-repo. +(fixes a concurrency bug in camel-jetty-2.4.0, to be officially released in Camel 2.5.0) || Martin Krasser || +|| **FIX** || #338 RedisStorageBackend fails when redis closes connection to idle client || Debasish Ghosh || +|| **FIX** || #340 RedisStorage Map.get does not throw exception when disconnected from redis but returns None || Debasish Ghosh || + +Release 0.9 - June 2th 2010 +=========================== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| || || || +|| **ADD** || Serializable, immutable, network-aware ActorRefs || Jonas Bonér || +|| **ADD** || Optionally JTA-aware STM transactions || Jonas Bonér || +|| **ADD** || Rewritten supervisor management, making use of ActorRef, now really kills the Actor instance and replaces it || Jonas Bonér || +|| **ADD** || Allow linking and unlinking a declaratively configured Supervisor || Jonas Bonér || +|| **ADD** || Remote protocol rewritten to allow passing along sender reference in all situations || Jonas Bonér || +|| **ADD** || #37 API for JTA usage || Jonas Bonér || +|| **ADD** || Added user accessible 'sender' and 'senderFuture' references || Jonas Bonér || +|| **ADD** || Sender actor is now passed along for all message send functions (!, !!, !!!, forward) || Jonas Bonér || +|| **ADD** || Subscription API for listening to RemoteClient failures || Jonas Bonér || +|| **ADD** || Implemented link/unlink for ActiveObjects || Jan Kronquist / Michael Kober || +|| **ADD** || Added alter method to TransactionalRef + added appl(initValue) to Transactional Map/Vector/Ref || Peter Vlugter || +|| **ADD** || Load dependency JARs in JAR deloyed in kernel's ,/deploy dir || Jonas Bonér || +|| **ADD** || Allowing using Akka without specifying AKKA_HOME or path to akka.conf config file || Jonas Bonér || +|| **ADD** || Redisclient now supports PubSub || Debasish Ghosh || +|| **ADD** || Added a sample project under akka-samples for Redis PubSub using Akka actors || Debasish Ghosh || +|| **ADD** || Richer API for Actor.reply || Viktor Klang || +|| **ADD** || Added Listeners to Akka patterns || Viktor Klang || +|| **ADD** || #183 Deactivate endpoints of stopped consumer actors || Martin Krasser || +|| **ADD** || Camel `Message API improvements `_ || Martin Krasser || +|| **ADD** || #83 Send notification to parent supervisor if all actors supervised by supervisor has been permanently killed || Jonas Bonér || +|| **ADD** || #121 Make it possible to dynamically create supervisor hierarchies for Active Objects || Michael Kober || +|| **ADD** || #131 Subscription API for node joining & leaving cluster || Jonas Bonér || +|| **ADD** || #145 Register listener for errors in RemoteClient/RemoteServer || Jonas Bonér || +|| **ADD** || #146 Create an additional distribution with sources || Jonas Bonér || +|| **ADD** || #149 Support loading JARs from META-INF/lib in JARs put into the ./deploy directory || Jonas Bonér || +|| **ADD** || #166 Implement insertVectorStorageEntriesFor in CassandraStorageBackend || Jonas Bonér || +|| **ADD** || #168 Separate ID from Value in Actor; introduce ActorRef || Jonas Bonér || +|| **ADD** || #174 Create sample module for remote actors || Jonas Bonér || +|| **ADD** || #175 Add new sample module with Peter Vlugter's Ant demo || Jonas Bonér || +|| **ADD** || #177 Rewrite remote protocol to make use of new ActorRef || Jonas Bonér || +|| **ADD** || #180 Make use of ActorRef indirection for fault-tolerance management || Jonas Bonér || +|| **ADD** || #184 Upgrade to Netty 3.2.0.CR1 || Jonas Bonér || +|| **ADD** || #185 Rewrite Agent and Supervisor to work with new ActorRef || Jonas Bonér || +|| **ADD** || #188 Change the order of how the akka.conf is detected || Jonas Bonér || +|| **ADD** || #189 Reintroduce 'sender: Option[Actor]' ref in Actor || Jonas Bonér || +|| **ADD** || #203 Upgrade to Scala 2.8 RC2 || Jonas Bonér || +|| **ADD** || #222 Using Akka without AKKA_HOME or akka.conf || Jonas Bonér || +|| **ADD** || #234 Add support for injection and management of ActiveObjectContext with RTTI such as 'sender' and 'senderFuture' references etc. || Jonas Bonér || +|| **ADD** || #236 Upgrade SBinary to Scala 2.8 RC2 || Jonas Bonér || +|| **ADD** || #235 Problem with RedisStorage.getVector(..) data structure storage management || Jonas Bonér || +|| **ADD** || #239 Upgrade to Camel 2.3.0 || Martin Krasser || +|| **ADD** || #242 Upgraded to Scala 2.8 RC3 || Jonas Bonér || +|| **ADD** || #243 Upgraded to Protobuf 2.3.0 || Jonas Bonér || +|| **ADD** || Added option to specify class loader when de-serializing messages and RemoteActorRef in RemoteClient || Jonas Bonér || +|| **ADD** || #238 Upgrading to Cassandra 0.6.1 || Jonas Bonér || +|| **ADD** || Upgraded to Jersey 1.2 || Viktor Klang || +|| **ADD** || Upgraded Atmosphere to 0.6-SNAPSHOT, adding WebSocket support || Viktor Klang || +|| **FIX** || Simplified ActiveObject configuration || Michael Kober || +|| **FIX** || #237 Upgrade Mongo Java driver to 1.4 (the latest stable release) || Debasish Ghosh || +|| **FIX** || #165 Implemented updateVectorStorageEntryFor in Mongo persistence module || Debasish Ghosh || +|| **FIX** || #154: Allow ActiveObjects to use the default timeout in config file || Michael Kober || +|| **FIX** || Active Object methods with @inittransactionalstate should be invoked automatically || Michael Kober || +|| **FIX** || Nested supervisor hierarchy failure propagation bug fixed || Jonas Bonér || +|| **FIX** || Fixed bug on CommitBarrier transaction registration || Jonas Bonér || +|| **FIX** || Merged many modules to reduce total number of modules || Viktor Klang || +|| **FIX** || Future parameterized || Viktor Klang || +|| **FIX** || #191: Workstealing dispatcher didn't work with !! || Viktor Klang || +|| **FIX** || #202: Allow applications to disable stream-caching || Martin Krasser || +|| **FIX** || #119 Problem with Cassandra-backed Vector || Jonas Bonér || +|| **FIX** || #147 Problem replying to remote sender when message sent with ! || Jonas Bonér || +|| **FIX** || #171 initial value of Ref can become null if first transaction rolled back || Jonas Bonér || +|| **FIX** || #172 Fix "broken" Protobuf serialization API || Jonas Bonér || +|| **FIX** || #173 Problem with Vector::slice in CassandraStorage || Jonas Bonér || +|| **FIX** || #190 RemoteClient shutdown ends up in endless loop || Jonas Bonér || +|| **FIX** || #211 Problem with getting CommitBarrierOpenException when using Transaction.Global || Jonas Bonér || +|| **FIX** || #240 Supervised actors not started when starting supervisor || Jonas Bonér || +|| **FIX** || Fixed problem with Transaction.Local not committing to persistent storage || Jonas Bonér || +|| **FIX** || #215: Re-engineered the JAX-RS support || Viktor Klang || +|| **FIX** || Many many bug fixes || Team || +|| **REM** || Shoal cluster module || Viktor Klang || + +Release 0.8.1 - April 6th 2010 +============================== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| || || || +|| **ADD** || Redis cluster support || Debasish Ghosh || +|| **ADD** || Reply to remote sender from message set with ! || Jonas Bonér || +|| **ADD** || Load-balancer which prefers actors with few messages in mailbox || Jan Van Besien || +|| **ADD** || Added developer mailing list: [akka-dev AT googlegroups DOT com] || Jonas Bonér || +|| **FIX** || Separated thread-local from thread-global transaction API || Jonas Bonér || +|| **FIX** || Fixed bug in using STM outside Actors || Jonas Bonér || +|| **FIX** || Fixed bug in anonymous actors || Jonas Bonér || +|| **FIX** || Moved web initializer to new akka-servlet module || Viktor Klang || + +Release 0.8 - March 31st 2010 +============================= + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| || || || +|| **ADD** || Scala 2.8 based || Viktor Klang || +|| **ADD** || Monadic API for Agents || Jonas Bonér || +|| **ADD** || Agents are transactional || Jonas Bonér || +|| **ADD** || Work-stealing dispatcher || Jan Van Besien || +|| **ADD** || Improved Spring integration || Michael Kober || +|| **FIX** || Various bugfixes || Team || +|| **FIX** || Improved distribution packaging || Jonas Bonér || +|| **REMOVE** || Actor.send function || Jonas Bonér || + +Release 0.7 - March 21st 2010 +============================= + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| || || || +|| **ADD** || Rewritten STM now works generically with fire-forget message flows || Jonas Bonér || +|| **ADD** || Apache Camel integration || Martin Krasser || +|| **ADD** || Spring integration || Michael Kober || +|| **ADD** || Server-managed Remote Actors || Jonas Bonér || +|| **ADD** || Clojure-style Agents || Viktor Klang || +|| **ADD** || Shoal cluster backend || Viktor Klang || +|| **ADD** || Redis-based transactional queue storage backend || Debasish Ghosh || +|| **ADD** || Redis-based transactional sorted set storage backend || Debasish Ghosh || +|| **ADD** || Redis-based atomic INC (index) operation || Debasish Ghosh || +|| **ADD** || Distributed Comet || Viktor Klang || +|| **ADD** || Project moved to SBT (simple-build-tool) || Peter Hausel || +|| **ADD** || Futures object with utility methods for Future's || Jonas Bonér || +|| **ADD** || !!! function that returns a Future || Jonas Bonér || +|| **ADD** || Richer ActorRegistry API || Jonas Bonér || +|| **FIX** || Improved event-based dispatcher performance with 40% || Jan Van Besien || +|| **FIX** || Improved remote client pipeline performance || Viktor Klang || +|| **FIX** || Support several Clusters on the same network || Viktor Klang || +|| **FIX** || Structural package refactoring || Jonas Bonér || +|| **FIX** || Various bugs fixed || Team || + +Release 0.6 - January 5th 2010 +============================== + +||~ =**Type** + ||~ +===== + +**Changes** + ||~ +===== + +**By**= || +|| || || || +|| **ADD** || Clustered Comet using Akka remote actors and clustered membership API || Viktor Klang || +|| **ADD** || Cluster membership API and implementation based on JGroups || Viktor Klang || +|| **ADD** || Security module for HTTP-based authentication and authorization || Viktor Klang || +|| **ADD** || Support for using Scala XML tags in RESTful Actors (scala-jersey) || Viktor Klang || +|| **ADD** || Support for Comet Actors using Atmosphere || Viktor Klang || +|| **ADD** || MongoDB as Akka storage backend || Debasish Ghosh || +|| **ADD** || Redis as Akka storage backend || Debasish Ghosh || +|| **ADD** || Transparent JSON serialization of Scala objects based on SJSON || Debasish Ghosh || +|| **ADD** || Kerberos/SPNEGO support for Security module || Eckhart Hertzler || +|| **ADD** || Implicit sender for remote actors: Remote actors are able to use reply to answer a request || Mikael Högqvist || +|| **ADD** || Support for using the Lift Web framework with Actors || Tim Perrett || +|| **ADD** || Added CassandraSession API (with socket pooling) wrapping Cassandra's Thrift API in Scala and Java APIs || Jonas Bonér || +|| **ADD** || Rewritten STM, now integrated with Multiverse STM || Jonas Bonér || +|| **ADD** || Added STM API for atomic {..} and run {..} orElse {..} || Jonas Bonér || +|| **ADD** || Added STM retry || Jonas Bonér || +|| **ADD** || AMQP integration; abstracted as actors in a supervisor hierarchy. Impl AMQP 0.9.1 || Jonas Bonér || +|| **ADD** || Complete rewrite of the persistence transaction management, now based on Unit of Work and Multiverse STM || Jonas Bonér || +|| **ADD** || Monadic API to TransactionalRef (use it in for-comprehension) || Jonas Bonér || +|| **ADD** || Lightweight actor syntax using one of the Actor.actor(..) methods. F.e: 'val a = actor { case _ => .. }' || Jonas Bonér || +|| **ADD** || Rewritten event-based dispatcher which improved perfomance by 10x, now substantially faster than event-driven Scala Actors || Jonas Bonér || +|| **ADD** || New Scala JSON parser based on sjson || Jonas Bonér || +|| **ADD** || Added zlib compression to remote actors || Jonas Bonér || +|| **ADD** || Added implicit sender reference for fire-forget ('!') message sends || Jonas Bonér || +|| **ADD** || Monadic API to TransactionalRef (use it in for-comprehension) || Jonas Bonér || +|| **ADD** || Smoother web app integration; just add akka.conf to the classpath (WEB-INF/classes), no need for AKKA_HOME or -Dakka.conf=.. || Jonas Bonér || +|| **ADD** || Modularization of distribution into a thin core (actors, remoting and STM) and the rest in submodules || Jonas Bonér || +|| **ADD** || Added 'forward' to Actor, forwards message but keeps original sender address || Jonas Bonér || +|| **ADD** || JSON serialization for Java objects (using Jackson) || Jonas Bonér || +|| **ADD** || JSON serialization for Scala objects (using SJSON) || Jonas Bonér || +|| **ADD** || Added implementation for remote actor reconnect upon failure || Jonas Bonér || +|| **ADD** || Protobuf serialization for Java and Scala objects || Jonas Bonér || +|| **ADD** || SBinary serialization for Scala objects || Jonas Bonér || +|| **ADD** || Protobuf as remote protocol || Jonas Bonér || +|| **ADD** || Updated Cassandra integration and CassandraSession API to v0.4 || Jonas Bonér || +|| **ADD** || CassandraStorage is now works with external Cassandra cluster || Jonas Bonér || +|| **ADD** || ActorRegistry for retrieving Actor instances by class name and by id || Jonas Bonér || +|| **ADD** || SchedulerActor for scheduling periodic tasks || Jonas Bonér || +|| **ADD** || Now start up kernel with 'java -jar dist/akka-0.6.jar' || Jonas Bonér || +|| **ADD** || Added Akka user mailing list: akka-user AT googlegroups DOT com]] || Jonas Bonér || +|| **ADD** || Improved and restructured documentation || Jonas Bonér || +|| **ADD** || New URL: http://akkasource.org || Jonas Bonér || +|| **ADD** || New and much improved docs || Jonas Bonér || +|| **ADD** || Enhanced trapping of failures: 'trapExit = List(classOf[..], classOf[..])' || Jonas Bonér || +|| **ADD** || Upgraded to Netty 3.2, Protobuf 2.2, ScalaTest 1.0, Jersey 1.1.3, Atmosphere 0.4.1, Cassandra 0.4.1, Configgy 1.4 || Jonas Bonér || +|| **FIX** || Lowered actor memory footprint; now an actor consumes ~600 bytes, which mean that you can create 6.5 million on 4 G RAM || Jonas Bonér || +|| **FIX** || Remote actors are now defined by their UUID (not class name) || Jonas Bonér || +|| **FIX** || Fixed dispatcher bugs || Jonas Bonér || +|| **FIX** || Cleaned up Maven scripts and distribution in general || Jonas Bonér || +|| **FIX** || Fixed many many bugs and minor issues || Jonas Bonér || +|| **FIX** || Fixed inconsistencies and uglyness in Actors API || Jonas Bonér || +|| **REMOVE** || Removed concurrent mode || Jonas Bonér || +|| **REMOVE** || Removed embedded Cassandra mode || Jonas Bonér || +|| **REMOVE** || Removed the !? method in Actor (synchronous message send, since it's evil. Use !! with time-out instead. || Jonas Bonér || +|| **REMOVE** || Removed startup scripts and lib dir || Jonas Bonér || +|| **REMOVE** || Removed the 'Transient' life-cycle scope since to close to 'Temporary' in semantics. || Jonas Bonér || +|| **REMOVE** || Removed 'Transient' Actors and restart timeout || Jonas Bonér || diff --git a/akka-docs/pending/remote-actors-java.rst b/akka-docs/pending/remote-actors-java.rst new file mode 100644 index 0000000000..0e654fc698 --- /dev/null +++ b/akka-docs/pending/remote-actors-java.rst @@ -0,0 +1,617 @@ +Remote Actors (Java) +==================== + +Module stability: **SOLID** + +Akka supports starting UntypedActors and TypedActors on remote nodes using a very efficient and scalable NIO implementation built upon `JBoss Netty `_ and `Google Protocol Buffers `_ . + +The usage is completely transparent both in regards to sending messages and error handling and propagation as well as supervision, linking and restarts. You can send references to other Actors as part of the message. + +**WARNING**: For security reasons, do not run an Akka node with a Remote Actor port reachable by untrusted connections unless you have supplied a classloader that restricts access to the JVM. + +Managing the Remote Service +--------------------------- + +Starting remote service in user code as a library +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here is how to start up the server and specify the hostname and port programatically: + +.. code-block:: java + + import static akka.actor.Actors.*; + + remote().start("localhost", 2552); + + // Specify the classloader to use to load the remote class (actor) + remote().start("localhost", 2552, classLoader); + +Here is how to start up the server and specify the hostname and port in the ‘akka.conf’ configuration file (see the section below for details): + +.. code-block:: java + + import static akka.actor.Actors.*; + + remote().start(); + + // Specify the classloader to use to load the remote class (actor) + remote().start(classLoader); + +Starting remote service as part of the stand-alone Kernel +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You simply need to make sure that the service is turned on in the external ‘akka.conf’ configuration file. + +.. code-block:: ruby + + akka { + remote { + server { + service = on + hostname = "localhost" + port = 2552 + connection-timeout = 1000 # in millis + } + } + } + +Stopping the server +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: java + + import static akka.actor.Actors.*; + + remote().shutdown(); + +Connecting and shutting down a client connection explicitly +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Normally you should not have to start and stop the client connection explicitly since that is handled by Akka on a demand basis. But if you for some reason want to do that then you can do it like this: + +.. code-block:: java + + import static akka.actor.Actors.*; + import java.net.InetSocketAddress; + + remote().shutdownClientConnection(new InetSocketAddress("localhost", 6666)); //Returns true if successful, else false + remote().restartClientConnection(new InetSocketAddress("localhost", 6666)); //Returns true if successful, else false + +Client message frame size configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can define the max message frame size for the remote messages: + +.. code-block:: ruby + + akka { + remote { + client { + message-frame-size = 1048576 + } + } + } + +Client reconnect configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Client automatically performs reconnection upon connection failure. + +You can configure it like this: + +.. code-block:: ruby + + akka { + remote { + client { + reconnect-delay = 5 # in seconds (5 sec default) + read-timeout = 10 # in seconds (10 sec default) + reconnection-time-window = 600 # the maximum time window that a client should try to reconnect for + } + } + } + +The client will automatically trying to reconnect to the server if the connection is broken. By default it has a reconnection window of 10 minutes (600 seconds). + +If it has not been able to reconnect during this period of time then it is shut down and further attempts to use it will yield a 'RemoteClientException'. The 'RemoteClientException' contains the message as well as a reference to the address that is not yet connect in order for you to retrieve it an do an explicit connect if needed. + +You can also register a listener that will listen for example the 'RemoteClientStopped' event, retrieve the address that got disconnected and reconnect explicitly. + +See the section on client listener and events below for details. + +Remote Client message buffering and send retry on failure +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Remote Client implements message buffering on network failure. This feature has zero overhead (even turned on) in the successful scenario and a queue append operation in case of unsuccessful send. So it is really really fast. + +The default behavior is that the remote client will maintain a transaction log of all messages that it has failed to send due to network problems (not other problems like serialization errors etc.). The client will try to resend these messages upon first successful reconnect and the message ordering is maintained. This means that the remote client will swallow all exceptions due to network failure and instead queue remote messages in the transaction log. The failures will however be reported through the remote client life-cycle events as well as the regular Akka event handler. You can turn this behavior on and off in the configuration file. It gives 'at-least-once' semantics, use a message id/counter for discarding potential duplicates (or use idempotent messages). + +.. code-block:: ruby + + akka { + remote { + client { + buffering { + retry-message-send-on-failure = on + capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set using the property + } + } + } + } + +If you choose a capacity higher than 0, then a bounded queue will be used and if the limit of the queue is reached then a 'RemoteClientMessageBufferException' will be thrown. + +You can also get an Array with all the messages that the remote client has failed to send. Since the remote client events passes you an instance of the RemoteClient you have an easy way to act upon failure and do something with these messages (while waiting for them to be retried). + +.. code-block:: java + + Object[] pending = Actors.remote().pendingMessages(); + +Running Remote Server in untrusted mode +--------------------------------------- + +You can run the remote server in untrusted mode. This means that the server will not allow any client-managed remote actors or any life-cycle messages and methods. This is useful if you want to let untrusted clients use server-managed actors in a safe way. This can optionally be combined with the secure cookie authentication mechanism described below as well as the SSL support for remote actor communication. + +If the client is trying to perform one of these unsafe actions then a 'java.lang.SecurityException' is thrown on the server as well as transferred to the client and thrown there as well. + +Here is how you turn it on: + +.. code-block:: ruby + + akka { + remote { + server { + untrusted-mode = on # the default is 'off' + } + } + } + +The messages that it prevents are all that extends 'LifeCycleMessage': +* case class HotSwap(..) +* case object RevertHotSwap +* case class Restart(..) +* case class Exit(..) +* case class Link(..) +* case class Unlink(..) +* case class UnlinkAndStop(..) +* case object ReceiveTimeout + +It also prevents the client from invoking any life-cycle and side-effecting methods, such as: +* start +* stop +* link +* unlink +* spawnLink +* etc. + +Using secure cookie for remote client authentication +---------------------------------------------------- + +Akka is using a similar scheme for remote client node authentication as Erlang; using secure cookies. In order to use this authentication mechanism you have to do two things: + +* Enable secure cookie authentication in the remote server +* Use the same secure cookie on all the trusted peer nodes + +Enabling secure cookie authentication +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The first one is done by enabling the secure cookie authentication in the remote server section in the configuration file: + +.. code-block:: ruby + + akka { + remote { + server { + require-cookie = on + } + } + +Now if you have try to connect to a server from a client then it will first try to authenticate the client by comparing the secure cookie for the two nodes. If they are the same then it allows the client to connect and use the server freely but if they are not the same then it will throw a 'java.lang.SecurityException' and not allow the client to connect. + +Generating and using the secure cookie +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The secure cookie can be any string value but in order to ensure that it is secure it is best to randomly generate it. This can be done by invoking the 'generate_config_with_secure_cookie.sh' script which resides in the '$AKKA_HOME/scripts' folder. This script will generate and print out a complete 'akka.conf' configuration file with the generated secure cookie defined that you can either use as-is or cut and paste the 'secure-cookie' snippet. Here is an example of its generated output: + +.. code-block:: ruby + + # This config imports the Akka reference configuration. + include "akka-reference.conf" + + # In this file you can override any option defined in the 'akka-reference.conf' file. + # Copy in all or parts of the 'akka-reference.conf' file and modify as you please. + + akka { + remote { + secure-cookie = "000E02050F0300040C050C0D060A040306090B0C" + } + } + +The simplest way to use it is to have it create your 'akka.conf' file like this: + +.. code-block:: ruby + + cd $AKKA_HOME + ./scripts/generate_config_with_secure_cookie.sh > ./config/akka.conf + +Now it is good to make sure that the configuration file is only accessible by the owner of the file. On Unix-style file system this can be done like this: + +.. code-block:: ruby + + chmod 400 ./config/akka.conf + +Running this script requires having 'scala' on the path (and will take a couple of seconds to run since it is using Scala and has to boot up the JVM to run). + +You can also generate the secure cookie by using the 'Crypt' object and its 'generateSecureCookie' method. + +.. code-block:: scala + + import akka.util.Crypt; + + String secureCookie = Crypt.generateSecureCookie(); + +The secure cookie is a cryptographically secure randomly generated byte array turned into a SHA-1 hash. + +Remote Actors +------------- + +Akka has two types of remote actors: + +* Client-initiated and managed. Here it is the client that creates the remote actor and "moves it" to the server. +* Server-initiated and managed. Here it is the server that creates the remote actor and the client can ask for a handle to this actor. + +They are good for different use-cases. The client-initiated are great when you want to monitor an actor on another node since it allows you to link to it and supervise it using the regular supervision semantics. They also make RPC completely transparent. The server-initiated, on the other hand, are great when you have a service running on the server that you want clients to connect to, and you want full control over the actor on the server side for security reasons etc. + +Client-managed Remote UntypedActor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DEPRECATED AS OF 1.1 + +When you define an actors as being remote it is instantiated as on the remote host and your local actor becomes a proxy, it works as a handle to the remote actor. The real execution is always happening on the remote node. + +Here is an example: + +.. code-block:: java + + import akka.actor.UntypedActor; + import static akka.actor.Actors.*; + + class MyActor extends UntypedActor { + public void onReceive(Object message) throws Exception { + ... + } + } + + //How to make it client-managed: + remote().actorOf(MyActor.class,"192.68.23.769", 2552); + +An UntypedActor can also start remote child Actors through one of the “spawn/link” methods. These will start, link and make the UntypedActor remote atomically. + +.. code-block:: java + + ... + getContext().spawnRemote(MyActor.class, hostname, port); + getContext().spawnLinkRemote(MyActor.class, hostname, port, timeoutInMsForFutures); + ... + +Server-managed Remote UntypedActor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Server side setup +***************** + +The API for server managed remote actors is really simple. 2 methods only: + +.. code-block:: java + + class MyActor extends UntypedActor { + public void onReceive(Object message) throws Exception { + ... + } + } + Actors.remote().start("localhost", 2552).register("hello-service", Actors.actorOf(HelloWorldActor.class); + +Actors created like this are automatically started. + +You can also register an actor by its UUD rather than ID or handle. This is done by prefixing the handle with the "uuid:" protocol. + +.. code-block:: scala + + server.register("uuid:" + actor.uuid, actor); + + server.unregister("uuid:" + actor.uuid); + +Client side usage +***************** + +.. code-block:: java + + ActorRef actor = Actors.remote().actorFor("hello-service", "localhost", 2552); + actor.sendOneWay("Hello"); + +There are many variations on the 'remote()#actorFor' method. Here are some of them: + +.. code-block:: java + + ... = actorFor(className, hostname, port); + ... = actorFor(className, timeout, hostname, port); + ... = actorFor(uuid, className, hostname, port); + ... = actorFor(uuid, className, timeout, hostname, port); + ... // etc + +All of these also have variations where you can pass in an explicit 'ClassLoader' which can be used when deserializing messages sent from the remote actor. + +Client-managed Remote TypedActor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DEPRECATED AS OF 1.1 + +Remote Typed Actors are created through the 'TypedActor.newRemoteInstance' factory method. + +.. code-block:: java + + MyPOJO remoteActor = (MyPOJO)TypedActor.newRemoteInstance(MyPOJO.class, MyPOJOImpl.class, , "localhost", 2552); + +And if you want to specify the timeout: + +.. code-block:: java + + MyPOJO remoteActor = (MyPOJO)TypedActor.newRemoteInstance(MyPOJO.class, MyPOJOImpl.class, timeout, "localhost", 2552); + +You can also define the Typed Actor to be a client-managed-remote service by adding the ‘RemoteAddress’ configuration element in the declarative supervisor configuration: + +.. code-block:: java + + new Component( + Foo.class, + FooImpl.class, + new LifeCycle(new Permanent(), 1000), + 1000, + new RemoteAddress("localhost", 2552)) + +Server-managed Remote TypedActor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +WARNING: Remote TypedActors do not work with overloaded methods on your TypedActor, refrain from using overloading. + +Server side setup +***************** + +The API for server managed remote typed actors is nearly the same as for untyped actor: + +.. code-block:: java + + import static akka.actor.Actors.*; + remote().start("localhost", 2552); + + RegistrationService typedActor = TypedActor.newInstance(RegistrationService.class, RegistrationServiceImpl.class, 2000); + remote().registerTypedActor("user-service", typedActor); + +Client side usage + +.. code-block:: java + + import static akka.actor.Actors.*; + RegistrationService actor = remote().typedActorFor(RegistrationService.class, "user-service", 5000L, "localhost", 2552); + actor.registerUser(...); + +There are variations on the 'remote()#typedActorFor' method. Here are some of them: + +.. code-block:: java + + ... = typedActorFor(interfaceClazz, serviceIdOrClassName, hostname, port); + ... = typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port); + ... = typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port, classLoader); + +Session bound server side setup +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Session bound server managed remote actors work by creating and starting a new actor for every client that connects. Actors are stopped automatically when the client disconnects. The client side is the same as regular server managed remote actors. Use the function registerPerSession instead of register. + +Session bound actors are useful if you need to keep state per session, e.g. username. They are also useful if you need to perform some cleanup when a client disconnects by overriding the postStop method as described `here `_ + +.. code-block:: java + + import static akka.actor.Actors.*; + class HelloWorldActor extends Actor { + ... + } + + remote().start("localhost", 2552); + + remote().registerPerSession("hello-service", new Creator[ActorRef]() { + public ActorRef create() { + return actorOf(HelloWorldActor.class); + } + }) + +Note that the second argument in registerPerSession is a Creator, it means that the create method will create a new ActorRef each invocation. +It will be called to create an actor every time a session is established. + +Client side usage +^^^^^^^^^^^^^^^^^ + +.. code-block:: java + + import static akka.actor.Actors.*; + ActorRef actor = remote().actorFor("hello-service", "localhost", 2552); + + Object result = actor.sendRequestReply("Hello"); + +There are many variations on the 'remote()#actorFor' method. Here are some of them: + +.. code-block:: java + + ... = actorFor(className, hostname, port); + ... = actorFor(className, timeout, hostname, port); + ... = actorFor(uuid, className, hostname, port); + ... = actorFor(uuid, className, timeout, hostname, port); + ... // etc + +Automatic remote 'sender' reference management +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Akka is automatically remote-enabling the sender Actor reference for you in order to allow the receiver to respond to the message using 'getContext().getSender().sendOneWay(msg);' or 'getContext().reply(msg);'. By default it is registering the sender reference in the remote server with the 'hostname' and 'port' from the akka.conf configuration file. The default is "localhost" and 2552 and if there is no remote server with this hostname and port then it creates and starts it. + +Identifying remote actors +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The 'id' field in the 'Actor' class is of importance since it is used as identifier for the remote actor. If you want to create a brand new actor every time you instantiate a remote actor then you have to set the 'id' field to a unique 'String' for each instance. If you want to reuse the same remote actor instance for each new remote actor (of the same class) you create then you don't have to do anything since the 'id' field by default is equal to the name of the actor class. + +Here is an example of overriding the 'id' field: + +.. code-block:: java + + import akka.util.UUID; + + class MyActor extends UntypedActor { + public MyActor() { + getContext().setId(UUID.newUuid().toString()); + } + + public void onReceive(Object message) throws Exception { + ... + } + } + +Data Compression Configuration +------------------------------ + +Akka uses compression to minimize the size of the data sent over the wire. Currently it only supports 'zlib' compression but more will come later. + +You can configure it like this: + +.. code-block:: ruby + + akka { + remote { + compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression + zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6 + + ... + } + } + +Subscribe to Remote Client events +--------------------------------- + +Akka has a subscription API for remote client events. You can register an Actor as a listener and this actor will have to be able to process these events: + +RemoteClientError { Throwable cause; RemoteClientModule client; InetSocketAddress remoteAddress; } +RemoteClientDisconnected { RemoteClientModule client; InetSocketAddress remoteAddress; } +RemoteClientConnected { RemoteClientModule client; InetSocketAddress remoteAddress; } +RemoteClientStarted { RemoteClientModule client; InetSocketAddress remoteAddress; } +RemoteClientShutdown { RemoteClientModule client; InetSocketAddress remoteAddress; } +RemoteClientWriteFailed { Object message; Throwable cause; RemoteClientModule client; InetSocketAddress remoteAddress; } + +So a simple listener actor can look like this: + +.. code-block:: java + + class Listener extends UntypedActor { + + public void onReceive(Object message) throws Exception { + if (message instanceof RemoteClientError) { + RemoteClientError event = (RemoteClientError)message; + Exception cause = event.getCause(); + ... + } else if (message instanceof RemoteClientConnected) { + RemoteClientConnected event = (RemoteClientConnected)message; + ... + } else if (message instanceof RemoteClientDisconnected) { + RemoteClientDisconnected event = (RemoteClientDisconnected)message; + ... + } else if (message instanceof RemoteClientStarted) { + RemoteClientStarted event = (RemoteClientStarted)message; + ... + } else if (message instanceof RemoteClientShutdown) { + RemoteClientShutdown event = (RemoteClientShutdown)message; + ... + } else if (message instanceof RemoteClientWriteFailed) { + RemoteClientWriteFailed event = (RemoteClientWriteFailed)message; + ... + } + } + } + +Registration and de-registration can be done like this: + +.. code-block:: java + + ActorRef listener = Actors.actorOf(Listener.class); + ... + Actors.remote().addListener(listener); + ... + Actors.remote().removeListener(listener); + +Subscribe to Remote Server events +--------------------------------- + +Akka has a subscription API for the server events. You can register an Actor as a listener and this actor will have to be able to process these events: + +RemoteServerStarted { RemoteServerModule server; } +RemoteServerShutdown { RemoteServerModule server; } +RemoteServerError { Throwable cause; RemoteServerModule server; } +RemoteServerClientConnected { RemoteServerModule server; Option clientAddress; } +RemoteServerClientDisconnected { RemoteServerModule server; Option clientAddress; } +RemoteServerClientClosed { RemoteServerModule server; Option clientAddress; } +RemoteServerWriteFailed { Object request; Throwable cause; RemoteServerModule server; Option clientAddress; } + +So a simple listener actor can look like this: + +.. code-block:: java + + class Listener extends UntypedActor { + + public void onReceive(Object message) throws Exception { + if (message instanceof RemoteServerError) { + RemoteServerError event = (RemoteServerError)message; + Exception cause = event.getCause(); + ... + } else if (message instanceof RemoteServerStarted) { + RemoteServerStarted event = (RemoteServerStarted)message; + ... + } else if (message instanceof RemoteServerShutdown) { + RemoteServerShutdown event = (RemoteServerShutdown)message; + ... + } else if (message instanceof RemoteServerClientConnected) { + RemoteServerClientConnected event = (RemoteServerClientConnected)message; + ... + } else if (message instanceof RemoteServerClientDisconnected) { + RemoteServerClientDisconnected event = (RemoteServerClientDisconnected)message; + ... + } else if (message instanceof RemoteServerClientClosed) { + RemoteServerClientClosed event = (RemoteServerClientClosed)message; + ... + } else if (message instanceof RemoteServerWriteFailed) { + RemoteServerWriteFailed event = (RemoteServerWriteFailed)message; + ... + } + } + } + +Registration and de-registration can be done like this: + +.. code-block:: java + + import static akka.actor.Actors.*; + + ActorRef listener = actorOf(Listener.class); + ... + remote().addListener(listener); + ... + remote().removeListener(listener); + +Message Serialization +--------------------- + +All messages that are sent to remote actors needs to be serialized to binary format to be able to travel over the wire to the remote node. This is done by letting your messages extend one of the traits in the 'akka.serialization.Serializable' object. If the messages don't implement any specific serialization trait then the runtime will try to use standard Java serialization. + +Read more about that in the `Serialization section `_. + +Code provisioning +----------------- + +Akka does currently not support automatic code provisioning but requires you to have the remote actor class files available on both the "client" the "server" nodes. +This is something that will be addressed soon. Until then, sorry for the inconvenience. diff --git a/akka-docs/pending/remote-actors-scala.rst b/akka-docs/pending/remote-actors-scala.rst new file mode 100644 index 0000000000..fef71e69e5 --- /dev/null +++ b/akka-docs/pending/remote-actors-scala.rst @@ -0,0 +1,722 @@ +Remote Actors (Scala) +===================== + +Module stability: **SOLID** + +Akka supports starting Actors and Typed Actors on remote nodes using a very efficient and scalable NIO implementation built upon `JBoss Netty `_ and `Google Protocol Buffers `_ . + +The usage is completely transparent both in regards to sending messages and error handling and propagation as well as supervision, linking and restarts. You can send references to other Actors as part of the message. + +You can find a runnable sample `here `_. + +Starting up the remote service +------------------------------ + +Starting remote service in user code as a library +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here is how to start up the RemoteNode and specify the hostname and port programatically: + +.. code-block:: scala + + import akka.actor.Actor._ + + remote.start("localhost", 2552) + + // Specify the classloader to use to load the remote class (actor) + remote.start("localhost", 2552, classLoader) + +Here is how to start up the RemoteNode and specify the hostname and port in the 'akka.conf' configuration file (see the section below for details): + +.. code-block:: scala + + import akka.actor.Actor._ + + remote.start() + + // Specify the classloader to use to load the remote class (actor) + remote.start(classLoader) + +Starting remote service as part of the stand-alone Kernel +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You simply need to make sure that the service is turned on in the external 'akka.conf' configuration file. + +.. code-block:: ruby + + akka { + remote { + server { + service = on + hostname = "localhost" + port = 2552 + connection-timeout = 1000 # in millis + } + } + } + +Stopping a RemoteNode or RemoteServer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you invoke 'shutdown' on the server then the connection will be closed. + +.. code-block:: scala + + import akka.actor.Actor._ + + remote.shutdown + +Connecting and shutting down a client explicitly +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Normally you should not have to start and stop the client connection explicitly since that is handled by Akka on a demand basis. But if you for some reason want to do that then you can do it like this: + +.. code-block:: scala + + import akka.actor.Actor._ + + remote.shutdownClientConnection(new InetSocketAddress("localhost", 6666)) //Returns true if successful, false otherwise + remote.restartClientConnection(new InetSocketAddress("localhost", 6666)) //Returns true if successful, false otherwise + +Remote Client message frame size configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can define the max message frame size for the remote messages: + +.. code-block:: ruby + + akka { + remote { + client { + message-frame-size = 1048576 + } + } + } + +Remote Client reconnect configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Remote Client automatically performs reconnection upon connection failure. + +You can configure it like this: + +.. code-block:: ruby + + akka { + remote { + client { + reconnect-delay = 5 # in seconds (5 sec default) + read-timeout = 10 # in seconds (10 sec default) + reconnection-time-window = 600 # the maximum time window that a client should try to reconnect for + } + } + } + +The RemoteClient is automatically trying to reconnect to the server if the connection is broken. By default it has a reconnection window of 10 minutes (600 seconds). + +If it has not been able to reconnect during this period of time then it is shut down and further attempts to use it will yield a 'RemoteClientException'. The 'RemoteClientException' contains the message as well as a reference to the RemoteClient that is not yet connect in order for you to retrieve it an do an explicit connect if needed. + +You can also register a listener that will listen for example the 'RemoteClientStopped' event, retrieve the 'RemoteClient' from it and reconnect explicitly. + +See the section on RemoteClient listener and events below for details. + +Remote Client message buffering and send retry on failure +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Remote Client implements message buffering on network failure. This feature has zero overhead (even turned on) in the successful scenario and a queue append operation in case of unsuccessful send. So it is really really fast. + +The default behavior is that the remote client will maintain a transaction log of all messages that it has failed to send due to network problems (not other problems like serialization errors etc.). The client will try to resend these messages upon first successful reconnect and the message ordering is maintained. This means that the remote client will swallow all exceptions due to network failure and instead queue remote messages in the transaction log. The failures will however be reported through the remote client life-cycle events as well as the regular Akka event handler. You can turn this behavior on and off in the configuration file. It gives 'at-least-once' semantics, use a message id/counter for discarding potential duplicates (or use idempotent messages). + +.. code-block:: ruby + + akka { + remote { + client { + buffering { + retry-message-send-on-failure = on + capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set using the property + } + } + } + } + +If you choose a capacity higher than 0, then a bounded queue will be used and if the limit of the queue is reached then a 'RemoteClientMessageBufferException' will be thrown. + +You can also get an Array with all the messages that the remote client has failed to send. Since the remote client events passes you an instance of the RemoteClient you have an easy way to act upon failure and do something with these messages (while waiting for them to be retried). + +.. code-block:: scala + + val pending: Array[Any] = Actor.remote.pendingMessages + +Running Remote Server in untrusted mode +--------------------------------------- + +You can run the remote server in untrusted mode. This means that the server will not allow any client-managed remote actors or any life-cycle messages and methods. This is useful if you want to let untrusted clients use server-managed actors in a safe way. This can optionally be combined with the secure cookie authentication mechanism described below as well as the SSL support for remote actor communication. + +If the client is trying to perform one of these unsafe actions then a 'java.lang.SecurityException' is thrown on the server as well as transferred to the client and thrown there as well. + +Here is how you turn it on: + +.. code-block:: ruby + + akka { + remote { + server { + untrusted-mode = on # the default is 'off' + } + } + } + +The messages that it prevents are all that extends 'LifeCycleMessage': +* class HotSwap(..) +* class RevertHotSwap..) +* class Restart(..) +* class Exit(..) +* class Link(..) +* class Unlink(..) +* class UnlinkAndStop(..) +* class ReceiveTimeout..) + +It also prevents the client from invoking any life-cycle and side-effecting methods, such as: +* start +* stop +* link +* unlink +* spawnLink +* etc. + +Using secure cookie for remote client authentication +---------------------------------------------------- + +Akka is using a similar scheme for remote client node authentication as Erlang; using secure cookies. In order to use this authentication mechanism you have to do two things: + +* Enable secure cookie authentication in the remote server +* Use the same secure cookie on all the trusted peer nodes + +Enabling secure cookie authentication +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The first one is done by enabling the secure cookie authentication in the remote server section in the configuration file: + +.. code-block:: ruby + + akka { + remote { + server { + require-cookie = on + } + } + } + +Now if you have try to connect to a server with a client then it will first try to authenticate the client by comparing the secure cookie for the two nodes. If they are the same then it allows the client to connect and use the server freely but if they are not the same then it will throw a 'java.lang.SecurityException' and not allow the client to connect. + +Generating and using the secure cookie +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The secure cookie can be any string value but in order to ensure that it is secure it is best to randomly generate it. This can be done by invoking the 'generate_config_with_secure_cookie.sh' script which resides in the '$AKKA_HOME/scripts' folder. This script will generate and print out a complete 'akka.conf' configuration file with the generated secure cookie defined that you can either use as-is or cut and paste the 'secure-cookie' snippet. Here is an example of its generated output: + +.. code-block:: ruby + + # This config imports the Akka reference configuration. + include "akka-reference.conf" + + # In this file you can override any option defined in the 'akka-reference.conf' file. + # Copy in all or parts of the 'akka-reference.conf' file and modify as you please. + + akka { + remote { + secure-cookie = "000E02050F0300040C050C0D060A040306090B0C" + } + } + +The simplest way to use it is to have it create your 'akka.conf' file like this: + +.. code-block:: ruby + + cd $AKKA_HOME + ./scripts/generate_config_with_secure_cookie.sh > ./config/akka.conf + +Now it is good to make sure that the configuration file is only accessible by the owner of the file. On Unix-style file system this can be done like this: + +.. code-block:: ruby + + chmod 400 ./config/akka.conf + +Running this script requires having 'scala' on the path (and will take a couple of seconds to run since it is using Scala and has to boot up the JVM to run). + +You can also generate the secure cookie by using the 'Crypt' object and its 'generateSecureCookie' method. + +.. code-block:: scala + + import akka.util.Crypt + + val secureCookie = Crypt.generateSecureCookie + +The secure cookie is a cryptographically secure randomly generated byte array turned into a SHA-1 hash. + +Remote Actors +------------- + +Akka has two types of remote actors: + +* Client-initiated and managed. Here it is the client that creates the remote actor and "moves it" to the server. +* Server-initiated and managed. Here it is the server that creates the remote actor and the client can ask for a handle to this actor. + +They are good for different use-cases. The client-initiated are great when you want to monitor an actor on another node since it allows you to link to it and supervise it using the regular supervision semantics. They also make RPC completely transparent. The server-initiated, on the other hand, are great when you have a service running on the server that you want clients to connect to, and you want full control over the actor on the server side for security reasons etc. + +Client-managed Remote Actors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DEPRECATED AS OF 1.1 + +When you define an actors as being remote it is instantiated as on the remote host and your local actor becomes a proxy, it works as a handle to the remote actor. The real execution is always happening on the remote node. + +Actors can be made remote by calling remote().actorOf[MyActor](host, port) + +Here is an example: + +.. code-block:: scala + + import akka.actor.Actor + + class MyActor extends RemoteActor() { + def receive = { + case "hello" => self.reply("world") + } + } + + val remote = Actor.remote().actorOf[MyActor]("192.68.23.769", 2552) + +An Actor can also start remote child Actors through one of the 'spawn/link' methods. These will start, link and make the Actor remote atomically. + +.. code-block:: scala + + ... + spawnRemote[MyActor](hostname, port) + spawnLinkRemote[MyActor](hostname, port) + ... + +Server-managed Remote Actors +---------------------------- + +Server side setup +^^^^^^^^^^^^^^^^^ + +The API for server managed remote actors is really simple. 2 methods only: + +.. code-block:: scala + + class HelloWorldActor extends Actor { + def receive = { + case "Hello" => self.reply("World") + } + } + + remote.start("localhost", 2552) //Start the server + remote.register("hello-service", actorOf[HelloWorldActor]) //Register the actor with the specified service id + +Actors created like this are automatically started. + +You can also register an actor by its UUD rather than ID or handle. This is done by prefixing the handle with the "uuid:" protocol. + +.. code-block:: scala + + remote.register("uuid:" + actor.uuid, actor) + + remote.unregister("uuid:" + actor.uuid) + +Session bound server side setup +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Session bound server managed remote actors work by creating and starting a new actor for every client that connects. Actors are stopped automatically when the client disconnects. The client side is the same as regular server managed remote actors. Use the function registerPerSession instead of register. + +Session bound actors are useful if you need to keep state per session, e.g. username. +They are also useful if you need to perform some cleanup when a client disconnects by overriding the postStop method as described `here `_ + +.. code-block:: scala + + class HelloWorldActor extends Actor { + def receive = { + case "Hello" => self.reply("World") + } + } + remote.start("localhost", 2552) + remote.registerPerSession("hello-service", actorOf[HelloWorldActor]) + +Note that the second argument in registerPerSession is an implicit function. It will be called to create an actor every time a session is established. + +Client side usage +^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val actor = remote.actorFor("hello-service", "localhost", 2552) + val result = actor !! "Hello" + +There are many variations on the 'remote#actorFor' method. Here are some of them: + +.. code-block:: scala + + ... = actorFor(className, hostname, port) + ... = actorFor(className, timeout, hostname, port) + ... = actorFor(uuid, className, hostname, port) + ... = actorFor(uuid, className, timeout, hostname, port) + ... // etc + +All of these also have variations where you can pass in an explicit 'ClassLoader' which can be used when deserializing messages sent from the remote actor. + +Running sample +^^^^^^^^^^^^^^ + +Here is a complete running sample (also available `here `_): + +.. code-block:: scala + + import akka.actor.Actor + import akka.util.Logging + import Actor._ + + class HelloWorldActor extends Actor { + def receive = { + case "Hello" => self.reply("World") + } + } + + object ServerInitiatedRemoteActorServer { + + def run = { + remote.start("localhost", 2552) + remote.register("hello-service", actorOf[HelloWorldActor]) + } + + def main(args: Array[String]) = run + } + + object ServerInitiatedRemoteActorClient extends Logging { + + def run = { + val actor = remote.actorFor("hello-service", "localhost", 2552) + val result = actor !! "Hello" + log.info("Result from Remote Actor: %s", result) + } + + def main(args: Array[String]) = run + } + +Automatic remote 'sender' reference management +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The sender of a remote message will be reachable with a reply through the remote server on the node that the actor is residing, automatically. +Please note that firewalled clients won't work right now. [2011-01-05] + +Identifying remote actors +------------------------- + +The 'id' field in the 'Actor' class is of importance since it is used as identifier for the remote actor. If you want to create a brand new actor every time you instantiate a remote actor then you have to set the 'id' field to a unique 'String' for each instance. If you want to reuse the same remote actor instance for each new remote actor (of the same class) you create then you don't have to do anything since the 'id' field by default is equal to the name of the actor class. + +Here is an example of overriding the 'id' field: + +.. code-block:: scala + + import akka.util.UUID + + class MyActor extends Actor { + self.id = UUID.newUuid.toString + def receive = { + case "hello" => self.reply("world") + } + } + + val actor = remote.actorOf[MyActor]("192.68.23.769", 2552) + +Remote Typed Actors +------------------- + +Client-managed Remote Actors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +DEPRECATED AS OF 1.1 + +You can define the Typed Actor to be a remote service by adding the 'RemoteAddress' configuration element in the declarative supervisor configuration: + +.. code-block:: java + + new Component( + Foo.class, + new LifeCycle(new Permanent(), 1000), + 1000, + new RemoteAddress("localhost", 2552)) + +You can also define an Typed Actor to be remote programmatically when creating it explicitly: + +.. code-block:: java + + TypedActorFactory factory = new TypedActorFactory(); + + POJO pojo = (POJO) factory.newRemoteInstance(POJO.class, 1000, "localhost", 2552) + + ... // use pojo as usual + +Server-managed Remote Actors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +WARNING: Remote TypedActors do not work with overloaded methods on your TypedActor, refrain from using overloading. + +Server side setup +***************** + +The API for server managed remote typed actors is nearly the same as for untyped actor + +.. code-block:: scala + + class RegistrationServiceImpl extends TypedActor with RegistrationService { + def registerUser(user: User): Unit = { + ... // register user + } + } + + remote.start("localhost", 2552) + + val typedActor = TypedActor.newInstance(classOf[RegistrationService], classOf[RegistrationServiceImpl], 2000) + remote.registerTypedActor("user-service", typedActor) + +Actors created like this are automatically started. + +Session bound server side setup +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Session bound server managed remote actors work by creating and starting a new actor for every client that connects. Actors are stopped automatically when the client disconnects. The client side is the same as regular server managed remote actors. Use the function registerTypedPerSessionActor instead of registerTypedActor. + +Session bound actors are useful if you need to keep state per session, e.g. username. +They are also useful if you need to perform some cleanup when a client disconnects. + +.. code-block:: scala + + class RegistrationServiceImpl extends TypedActor with RegistrationService { + def registerUser(user: User): Unit = { + ... // register user + } + } + remote.start("localhost", 2552) + + remote.registerTypedPerSessionActor("user-service", + TypedActor.newInstance(classOf[RegistrationService], + classOf[RegistrationServiceImpl], 2000)) + +Note that the second argument in registerTypedPerSessionActor is an implicit function. It will be called to create an actor every time a session is established. + +Client side usage +***************** + +.. code-block:: scala + + val actor = remote.typedActorFor(classOf[RegistrationService], "user-service", 5000L, "localhost", 2552) + actor.registerUser(…) + +There are variations on the 'RemoteClient#typedActorFor' method. Here are some of them: + +.. code-block:: scala + + ... = typedActorFor(interfaceClazz, serviceIdOrClassName, hostname, port) + ... = typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port) + ... = typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port, classLoader) + +Data Compression Configuration +------------------------------ + +Akka uses compression to minimize the size of the data sent over the wire. Currently it only supports 'zlib' compression but more will come later. + +You can configure it like this: + +.. code-block:: ruby + + akka { + remote { + compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression + zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6 + } + } + +Code provisioning +----------------- + +Akka does currently not support automatic code provisioning but requires you to have the remote actor class files available on both the "client" the "server" nodes. +This is something that will be addressed soon. Until then, sorry for the inconvenience. + +Subscribe to Remote Client events +--------------------------------- + +Akka has a subscription API for the client event. You can register an Actor as a listener and this actor will have to be able to process these events: + +.. code-block:: scala + + sealed trait RemoteClientLifeCycleEvent + case class RemoteClientError( + @BeanProperty cause: Throwable, + @BeanProperty client: RemoteClientModule, + @BeanProperty remoteAddress: InetSocketAddress) extends RemoteClientLifeCycleEvent + + case class RemoteClientDisconnected( + @BeanProperty client: RemoteClientModule, + @BeanProperty remoteAddress: InetSocketAddress) extends RemoteClientLifeCycleEvent + + case class RemoteClientConnected( + @BeanProperty client: RemoteClientModule, + @BeanProperty remoteAddress: InetSocketAddress) extends RemoteClientLifeCycleEvent + + case class RemoteClientStarted( + @BeanProperty client: RemoteClientModule, + @BeanProperty remoteAddress: InetSocketAddress) extends RemoteClientLifeCycleEvent + + case class RemoteClientShutdown( + @BeanProperty client: RemoteClientModule, + @BeanProperty remoteAddress: InetSocketAddress) extends RemoteClientLifeCycleEvent + + case class RemoteClientWriteFailed( + @BeanProperty request: AnyRef, + @BeanProperty cause: Throwable, + @BeanProperty client: RemoteClientModule, + @BeanProperty remoteAddress: InetSocketAddress) extends RemoteClientLifeCycleEvent + +So a simple listener actor can look like this: + +.. code-block:: scala + + val listener = actorOf(new Actor { + def receive = { + case RemoteClientError(cause, client, address) => ... // act upon error + case RemoteClientDisconnected(client, address) => ... // act upon disconnection + case RemoteClientConnected(client, address) => ... // act upon connection + case RemoteClientStarted(client, address) => ... // act upon client shutdown + case RemoteClientShutdown(client, address) => ... // act upon client shutdown + case RemoteClientWriteFailed(request, cause, client, address) => ... // act upon write failure + case _ => //ignore other + } + }).start() + +Registration and de-registration can be done like this: + +.. code-block:: scala + + remote.addListener(listener) + ... + remote.removeListener(listener) + +Subscribe to Remote Server events +--------------------------------- + +Akka has a subscription API for the 'RemoteServer'. You can register an Actor as a listener and this actor will have to be able to process these events: + +.. code-block:: scala + + sealed trait RemoteServerLifeCycleEvent + case class RemoteServerStarted( + @BeanProperty val server: RemoteServerModule) extends RemoteServerLifeCycleEvent + case class RemoteServerShutdown( + @BeanProperty val server: RemoteServerModule) extends RemoteServerLifeCycleEvent + case class RemoteServerError( + @BeanProperty val cause: Throwable, + @BeanProperty val server: RemoteServerModule) extends RemoteServerLifeCycleEvent + case class RemoteServerClientConnected( + @BeanProperty val server: RemoteServerModule, + @BeanProperty val clientAddress: Option[InetSocketAddress]) extends RemoteServerLifeCycleEvent + case class RemoteServerClientDisconnected( + @BeanProperty val server: RemoteServerModule, + @BeanProperty val clientAddress: Option[InetSocketAddress]) extends RemoteServerLifeCycleEvent + case class RemoteServerClientClosed( + @BeanProperty val server: RemoteServerModule, + @BeanProperty val clientAddress: Option[InetSocketAddress]) extends RemoteServerLifeCycleEvent + case class RemoteServerWriteFailed( + @BeanProperty request: AnyRef, + @BeanProperty cause: Throwable, + @BeanProperty server: RemoteServerModule, + @BeanProperty clientAddress: Option[InetSocketAddress]) extends RemoteServerLifeCycleEvent + +So a simple listener actor can look like this: + +.. code-block:: scala + + val listener = actorOf(new Actor { + def receive = { + case RemoteServerStarted(server) => ... // act upon server start + case RemoteServerShutdown(server) => ... // act upon server shutdown + case RemoteServerError(cause, server) => ... // act upon server error + case RemoteServerClientConnected(server, clientAddress) => ... // act upon client connection + case RemoteServerClientDisconnected(server, clientAddress) => ... // act upon client disconnection + case RemoteServerClientClosed(server, clientAddress) => ... // act upon client connection close + case RemoteServerWriteFailed(request, casue, server, clientAddress) => ... // act upon server write failure + } + }).start() + +Registration and de-registration can be done like this: + +.. code-block:: scala + + remote.addListener(listener) + ... + remote.removeListener(listener) + +Message Serialization +--------------------- + +All messages that are sent to remote actors needs to be serialized to binary format to be able to travel over the wire to the remote node. This is done by letting your messages extend one of the traits in the 'akka.serialization.Serializable' object. If the messages don't implement any specific serialization trait then the runtime will try to use standard Java serialization. + +Here are some examples, but full documentation can be found in the `Serialization section `_. + +Scala JSON +^^^^^^^^^^ + +.. code-block:: scala + + case class MyMessage(id: String, value: Tuple2[String, Int]) extends Serializable.ScalaJSON[MyMessage] + +Protobuf +^^^^^^^^ + +Protobuf message specification needs to be compiled with 'protoc' compiler. + +.. code-block:: scala + + message ProtobufPOJO { + required uint64 id = 1; + required string name = 2; + required bool status = 3; + } + +Using the generated message builder to send the message to a remote actor: + +.. code-block:: scala + + val result = actor !! ProtobufPOJO.newBuilder + .setId(11) + .setStatus(true) + .setName("Coltrane") + .build + +SBinary +^^^^^^^ + +``_ +case class User(firstNameLastName: Tuple2[String, String], email: String, age: Int) extends Serializable.SBinary[User] { + import sbinary.DefaultProtocol._ + + def this() = this(null, null, 0) + + implicit object UserFormat extends Format[User] { + def reads(in : Input) = User( + read[Tuple2[String, String]](in), + read[String](in), + read[Int](in)) + def writes(out: Output, value: User) = { + write[Tuple2[String, String]](out, value. firstNameLastName) + write[String](out, value.email) + write[Int](out, value.age) + } + } + + def fromBytes(bytes: Array[Byte]) = fromByteArray[User](bytes) + + def toBytes: Array[Byte] = toByteArray(this) +} +``_ diff --git a/akka-docs/pending/routing-java.rst b/akka-docs/pending/routing-java.rst new file mode 100644 index 0000000000..2c818af896 --- /dev/null +++ b/akka-docs/pending/routing-java.rst @@ -0,0 +1,93 @@ +Routing (Java) +============== + +**UntypedDispatcher** +--------------------- + +An UntypedDispatcher is an actor that routes incoming messages to outbound actors. + +.. code-block:: java + + import static akka.actor.Actors.*; + import akka.actor.*; + import akka.routing.*; + + //A Pinger is an UntypedActor that prints "Pinger: " + class Pinger extends UntypedActor { + public void onReceive(Object message) throws Exception { + System.out.println("Pinger: " + message); + } + } + + //A Ponger is an UntypedActor that prints "Ponger: " + class Ponger extends UntypedActor { + public void onReceive(Object message) throws Exception { + System.out.println("Ponger: " + message); + } + } + + public class MyDispatcher extends UntypedDispatcher { + private ActorRef pinger = actorOf(Pinger.class).start(); + private ActorRef ponger = actorOf(Ponger.class).start(); + + //Route Ping-messages to the pinger, and Pong-messages to the ponger + public ActorRef route(Object message) { + if("Ping".equals(message)) return pinger; + else if("Pong".equals(message)) return ponger; + else throw new IllegalArgumentException("I do not understand " + message); + } + } + + ActorRef dispatcher = actorOf(MyDispatcher.class).start(); + dispatcher.sendOneWay("Ping"); //Prints "Pinger: Ping" + dispatcher.sendOneWay("Pong"); //Prints "Ponger: Pong" + +**UntypedLoadBalancer** +----------------------- + +An UntypedLoadBalancer is an actor that forwards messages it receives to a boundless sequence of destination actors. + +.. code-block:: java + + import static akka.actor.Actors.*; + import akka.actor.*; + import akka.routing.*; + import static java.util.Arrays.asList; + + //A Pinger is an UntypedActor that prints "Pinger: " + class Pinger extends UntypedActor { + public void onReceive(Object message) throws Exception { + System.out.println("Pinger: " + message); + } + } + + //A Ponger is an UntypedActor that prints "Ponger: " + class Ponger extends UntypedActor { + public void onReceive(Object message) throws Exception { + System.out.println("Ponger: " + message); + } + } + + //Our load balancer, sends messages to a pinger, then a ponger, rinse and repeat. + public class MyLoadBalancer extends UntypedLoadBalancer { + private InfiniteIterator actors = new CyclicIterator(asList( + actorOf(Pinger.class).start(), + actorOf(Ponger.class).start() + )); + + public InfiniteIterator seq() { + return actors; + } + } + + ActorRef dispatcher = actorOf(MyLoadBalancer.class).start(); + dispatcher.sendOneWay("Pong"); //Prints "Pinger: Pong" + dispatcher.sendOneWay("Ping"); //Prints "Ponger: Ping" + dispatcher.sendOneWay("Ping"); //Prints "Pinger: Ping" + dispatcher.sendOneWay("Pong"); //Prints "Ponger: Pong + +You can also send a 'new Routing.Broadcast(msg)' message to the router to have it be broadcasted out to all the actors it represents. + +``_ +router.sendOneWay(new Routing.Broadcast(new PoisonPill())); +``_ diff --git a/akka-docs/pending/routing-scala.rst b/akka-docs/pending/routing-scala.rst new file mode 100644 index 0000000000..4cb825219e --- /dev/null +++ b/akka-docs/pending/routing-scala.rst @@ -0,0 +1,263 @@ +**Routing / Patterns (Scala)** + +Akka-core includes some building blocks to build more complex message flow handlers, they are listed and explained below: + +Dispatcher +---------- + +A Dispatcher is an actor that routes incoming messages to outbound actors. + +To use it you can either create a Dispatcher through the **dispatcherActor()** factory method + +.. code-block:: scala + + import akka.actor.Actor._ + import akka.actor.Actor + import akka.routing.Routing._ + + //Our message types + case object Ping + case object Pong + + //Two actors, one named Pinger and one named Ponger + //The actor(pf) method creates an anonymous actor and starts it + val pinger = actorOf(new Actor { def receive = { case x => println("Pinger: " + x) } }).start() + val ponger = actorOf(new Actor { def receive = { case x => println("Ponger: " + x) } }).start() + + //A dispatcher that dispatches Ping messages to the pinger + //and Pong messages to the ponger + val d = dispatcherActor { + case Ping => pinger + case Pong => ponger + } + + d ! Ping //Prints "Pinger: Ping" + d ! Pong //Prints "Ponger: Pong" + +Or by mixing in akka.patterns.Dispatcher: + +.. code-block:: scala + + import akka.actor.Actor + import akka.actor.Actor._ + import akka.routing.Dispatcher + + //Our message types + case object Ping + case object Pong + + class MyDispatcher extends Actor with Dispatcher { + //Our pinger and ponger actors + val pinger = actorOf(new Actor { def receive = { case x => println("Pinger: " + x) } }).start() + val ponger = actorOf(new Actor { def receive = { case x => println("Ponger: " + x) } }).start() + //When we get a ping, we dispatch to the pinger + //When we get a pong, we dispatch to the ponger + def routes = { + case Ping => pinger + case Pong => ponger + } + } + + //Create an instance of our dispatcher, and start it + val d = actorOf[MyDispatcher].start() + + d ! Ping //Prints "Pinger: Ping" + d ! Pong //Prints "Ponger: Pong" + +LoadBalancer +------------ + +A LoadBalancer is an actor that forwards messages it receives to a boundless sequence of destination actors. + +Example using the **loadBalancerActor()** factory method: + +.. code-block:: scala + + import akka.actor.Actor._ + import akka.actor.Actor + import akka.routing.Routing._ + import akka.routing.CyclicIterator + + //Our message types + case object Ping + case object Pong + + //Two actors, one named Pinger and one named Ponger + //The actor(pf) method creates an anonymous actor and starts it + + val pinger = actorOf(new Actor { def receive = { case x => println("Pinger: " + x) } }).start() + val ponger = actorOf(new Actor { def receive = { case x => println("Ponger: " + x) } }).start() + + //A load balancer that given a sequence of actors dispatches them accordingly + //a CyclicIterator works in a round-robin-fashion + + val d = loadBalancerActor( new CyclicIterator( List(pinger,ponger) ) ) + + d ! Pong //Prints "Pinger: Pong" + d ! Pong //Prints "Ponger: Pong" + d ! Ping //Prints "Pinger: Ping" + d ! Ping //Prints "Ponger: Ping" + +Or by mixing in akka.routing.LoadBalancer + +.. code-block:: scala + + import akka.actor._ + import akka.actor.Actor._ + import akka.routing.{ LoadBalancer, CyclicIterator } + + //Our message types + case object Ping + case object Pong + + //A load balancer that balances between a pinger and a ponger + class MyLoadBalancer extends Actor with LoadBalancer { + val pinger = actorOf(new Actor { def receive = { case x => println("Pinger: " + x) } }).start() + val ponger = actorOf(new Actor { def receive = { case x => println("Ponger: " + x) } }).start() + + val seq = new CyclicIterator[ActorRef](List(pinger,ponger)) + } + + //Create an instance of our loadbalancer, and start it + val d = actorOf[MyLoadBalancer].start() + + d ! Pong //Prints "Pinger: Pong" + d ! Pong //Prints "Ponger: Pong" + d ! Ping //Prints "Pinger: Ping" + d ! Ping //Prints "Ponger: Ping" + +Also, instead of using the CyclicIterator, you can create your own message distribution algorithms, there’s already `one <@http://github.com/jboner/akka/blob/master/akka-core/src/main/scala/routing/Iterators.scala#L31>`_ that dispatches depending on target mailbox size, effectively dispatching to the one that’s got fewest messages to process right now. + +Example ``_ + +You can also send a 'Routing.Broadcast(msg)' message to the router to have it be broadcasted out to all the actors it represents. + +.. code-block:: scala + + router ! Routing.Broadcast(PoisonPill) + +Actor Pool +---------- + +An actor pool is similar to the load balancer is that it routes incoming messages to other actors. It has different semantics however when it comes to how those actors are managed and selected for dispatch. Therein lies the difference. The pool manages, from start to shutdown, the lifecycle of all delegated actors. The number of actors in a pool can be fixed or grow and shrink over time. Also, messages can be routed to more than one actor in the pool if so desired. This is a useful little feature for accounting for expected failure - especially with remoting - where you can invoke the same request of multiple actors and just take the first, best response. + +The actor pool is built around three concepts: capacity, filtering and selection. + +Selection +^^^^^^^^^ + +All pools require a *Selector* to be mixed-in. This trait controls how and how many actors in the pool will receive the incoming message. Define *selectionCount* to some positive number greater than one to route to multiple actors. Currently two are provided: +* `SmallestMailboxSelector `_ - Using the exact same logic as the iterator of the same name, the pooled actor with the fewest number of pending messages will be chosen. +* `RoundRobinSelector `_ - Performs a very simple index-based selection, wrapping around the end of the list, very much like the CyclicIterator does. + +* + +Partial Fills +************* + +When selecting more than one pooled actor, its possible that in order to fulfill the requested amount, the selection set must contain duplicates. By setting *partialFill* to **true**, you instruct the selector to return only unique actors from the pool. + +Capacity +^^^^^^^^ + +As you'd expect, capacity traits determine how the pool is funded with actors. There are two types of strategies that can be employed: +* `FixedCapacityStrategy `_ - When you mix this into your actor pool, you define a pool size and when the pool is started, it will have that number of actors within to which messages will be delegated. +* `BoundedCapacityStrategy `_ - When you mix this into your actor pool, you define upper and lower bounds, and when the pool is started, it will have the minimum number of actors in place to handle messages. You must also mix-in a Capacitor and a Filter when using this strategy (see below). + +The *BoundedCapacityStrategy* requires additional logic to function. Specifically it requires a *Capacitor* and a *Filter*. Capacitors are used to determine the pressure that the pool is under and provide a (usually) raw reading of this information. Currently we provide for the use of either mailbox backlog or active futures count as a means of evaluating pool pressure. Each expresses itself as a simple number - a reading of the number of actors either with mailbox sizes over a certain threshold or blocking a thread waiting on a future to complete or expire. + +Filtering +^^^^^^^^^ + +A *Filter* is a trait that modifies the raw pressure reading returned from a Capacitor such that it drives the adjustment of the pool capacity to a desired end. More simply, if we just used the pressure reading alone, we might only ever increase the size of the pool (to respond to overload) or we might only have a single mechanism for reducing the pool size when/if it became necessary. This behavior is fully under your control through the use of *Filters*. Let's take a look at some code to see how this works: + +.. code-block:: scala + + trait BoundedCapacitor + { + def lowerBound:Int + def upperBound:Int + + def capacity(delegates:Seq[ActorRef]):Int = + { + val current = delegates length + var delta = _eval(delegates) + val proposed = current + delta + + if (proposed < lowerBound) delta += (lowerBound - proposed) + else if (proposed > upperBound) delta -= (proposed - upperBound) + + delta + } + + protected def _eval(delegates:Seq[ActorRef]):Int + } + + trait CapacityStrategy + { + import ActorPool._ + + def pressure(delegates:Seq[ActorRef]):Int + def filter(pressure:Int, capacity:Int):Int + + protected def _eval(delegates:Seq[ActorRef]):Int = filter(pressure(delegates), delegates.size) + } + +Here we see how the filter function will have the chance to modify the pressure reading to influence the capacity change. You are free to implement filter() however you like. We provide a `Filter `_ trait that evaluates both a rampup and a backoff subfilter to determine how to use the pressure reading to alter the pool capacity. There are several subfilters available to use, though again you may create whatever makes the most sense for you pool: +* `BasicRampup `_ - When pressure exceeds current capacity, increase the number of actors in the pool by some factor (*rampupRate*) of the current pool size. +* `BasicBackoff `_ - When the pressure ratio falls under some predefined amount (*backoffThreshold*), decrease the number of actors in the pool by some factor of the current pool size. +* `RunningMeanBackoff `_ - This filter tracks the average pressure-to-capacity over the lifetime of the pool (or since the last time the filter was reset) and will begin to reduce capacity once this mean falls below some predefined amount. The number of actors that will be stopped is determined by some factor of the difference between the current capacity and pressure. The idea behind this filter is to reduce the likelihood of "thrashing" (removing then immediately creating...) pool actors by delaying the backoff until some quiescent stage of the pool. Put another way, use this subfilter to allow quick rampup to handle load and more subtle backoff as that decreases over time. + +Examples +^^^^^^^^ + +.. code-block:: scala + + class TestPool extends Actor with DefaultActorPool + with BoundedCapacityStrategy + with ActiveFuturesPressureCapacitor + with SmallestMailboxSelector + with BasicNoBackoffFilter + { + def factory = actorOf(new Actor {def receive = {case n:Int => + Thread.sleep(n) + counter.incrementAndGet + latch.countDown()}}) + + def lowerBound = 2 + def upperBound = 4 + def rampupRate = 0.1 + def partialFill = true + def selectionCount = 1 + def instance = factory + def receive = _route + } + +.. code-block:: scala + + class TestPool extends Actor with DefaultActorPool + with BoundedCapacityStrategy + with MailboxPressureCapacitor + with SmallestMailboxSelector + with Filter + with RunningMeanBackoff + with BasicRampup + { + + def factory = actorOf(new Actor {def receive = {case n:Int => + Thread.sleep(n) + latch.countDown()}}) + + def lowerBound = 1 + def upperBound = 5 + def pressureThreshold = 1 + def partialFill = true + def selectionCount = 1 + def rampupRate = 0.1 + def backoffRate = 0.50 + def backoffThreshold = 0.50 + def instance = factory + def receive = _route + } + +Taken from the unit test `spec `_. diff --git a/akka-docs/pending/scheduler.rst b/akka-docs/pending/scheduler.rst new file mode 100644 index 0000000000..ac0c7a3a50 --- /dev/null +++ b/akka-docs/pending/scheduler.rst @@ -0,0 +1,16 @@ +Scheduler +========= + +Module stability: **SOLID** + +Akka has a little scheduler written using actors. Can be convenient if you want to schedule some periodic task for maintenance or similar. + +It allows you to register a message that you want to be sent to a specific actor at a periodic interval. Here is an example: + +``_ +//Sends messageToBeSent to receiverActor after initialDelayBeforeSending and then after each delayBetweenMessages +Scheduler.schedule(receiverActor, messageToBeSent, initialDelayBeforeSending, delayBetweenMessages, timeUnit) + +//Sends messageToBeSent to receiverActor after delayUntilSend +Scheduler.scheduleOnce(receiverActor, messageToBeSent, delayUntilSend, timeUnit) +``_ diff --git a/akka-docs/pending/security.rst b/akka-docs/pending/security.rst new file mode 100644 index 0000000000..3600c21285 --- /dev/null +++ b/akka-docs/pending/security.rst @@ -0,0 +1,261 @@ +Security +======== + +Module stability: **IN PROGRESS** + +Akka supports security for access to RESTful Actors through `HTTP Authentication `_. The security is implemented as a jersey ResourceFilter which delegates the actual authentication to an authentication actor. + +Akka provides authentication via the following authentication schemes: +* `Basic Authentication `_ +* `Digest Authentication `_ +* `Kerberos SPNEGO Authentication `_ + +The authentication is performed by implementations of akka.security.AuthenticationActor. + +Akka provides a trait for each authentication scheme: +* BasicAuthenticationActor +* DigestAuthenticationActor +* SpnegoAuthenticationActor + +With Akka’s excellent support for distributed databases, it’s a one-liner to do a distributed authentication scheme. + +^ + +Setup +===== + +To secure your RESTful actors you need to perform the following steps: + +1. configure the resource filter factory 'akka.security.AkkaSecurityFilterFactory' in the 'akka.conf' like this: + +.. code-block:: ruby + + akka { + ... + rest { + filters="akka.security.AkkaSecurityFilterFactory" + } + ... + } + +2. Configure an implementation of an authentication actor in 'akka.conf': + +.. code-block:: ruby + + akka { + ... + rest { + filters= ... + authenticator = "akka.security.samples.BasicAuthenticationService" + } + ... + } + +3. Start your authentication actor in your 'Boot' class. The security package consists of the following parts: + +4. Secure your RESTful actors using class or resource level annotations: +* @DenyAll +* @RolesAllowed(listOfRoles) +* @PermitAll + +Security Samples +---------------- + +The akka-samples-security module contains a small sample application with sample implementations for each authentication scheme. +You can start the sample app using the jetty plugin: mvn jetty:run. + +The RESTful actor can then be accessed using your browser of choice under: +* permit access only to users having the “chef” role: ``_ +* public access: ``_ + +You can access the secured resource using any user for basic authentication (which is the default authenticator in the sample app). + +Digest authentication can be directly enabled in the sample app. Kerberos/SPNEGO authentication is a bit more involved an is described below. + +^ + +Kerberos/SPNEGO Authentication +------------------------------ + +Kerberos is a network authentication protocol, (see ``_). It provides strong authentication for client/server applications. +In a kerberos enabled environment a user will need to sign on only once. Subsequent authentication to applications is handled transparently by kerberos. + +Most prominently the kerberos protocol is used to authenticate users in a windows network. When deploying web applications to a corporate intranet an important feature will be to support the single sign on (SSO), which comes to make the application kerberos aware. + +How does it work (at least for REST actors)? +# When accessing a secured resource the server will check the request for the *Authorization* header as with basic or digest authentication. +# If it is not set, the server will respond with a challenge to “Negotiate”. The negotiation is in fact the NEGO part of the `SPNEGO `_ specification) +# The browser will then try to acquire a so called *service ticket* from a ticket granting service, i.e. the kerberos server +# The browser will send the *service ticket* to the web application encoded in the header value of the *Authorization*header +# The web application must validate the ticket based on a shared secret between the web application and the kerberos server. As a result the web application will know the name of the user + +To activate the kerberos/SPNEGO authentication for your REST actor you need to enable the kerberos/SPNGEOauthentication actor in the akka.conf like this: + +.. code-block:: ruby + + akka { + ... + rest { + filters= ... + authenticator = "akka.security.samples.SpnegoAuthenticationService" + } + ... + } + +Furthermore you must provide the SpnegoAuthenticator with the following information. +# Service principal name: the name of your web application in the kerberos servers user database. This name is always has the form “HTTP/{server}@{realm}” +# Path to the keytab file: this is a kind of certificate for your web application to acquire tickets from the kerberos server + +.. code-block:: ruby + + akka { + ... + rest { + filters= ... + authenticator = "akka.security.samples.SpnegoAuthenticationService" + kerberos { + servicePrincipal = "HTTP/{server}@{realm}" + keyTabLocation = "URL to keytab" + # kerberosDebug = "true" + } + } + ... + } + +^ + +How to setup kerberos on localhost for Ubuntu +--------------------------------------------- + +This is a short step by step description of howto set up a kerberos server on an ubuntu system. + +1. Install the Heimdal Kerberos Server and Client + +:: + + sudo apt-get install heimdal-clients heimdal-clients-x heimdal-kdc krb5-config + ... + +2. Set up your kerberos realm. In this example the realm is of course … EXAMPLE.COM + +:: + + eckart@dilbert:~$ sudo kadmin -l + kadmin> init EXAMPLE.COM + Realm max ticket life [unlimited]: + Realm max renewable ticket life [unlimited]: + kadmin> quit + +3. Tell your kerberos clients what your realm is and where to find the kerberos server (aka the Key Distribution Centre or KDC) + +Edit the kerberos config file: /etc/krb5.conf and configure … +…the default realm: + +:: + + [libdefaults] + default_realm = EXAMPLE.COM + +… where to find the KDC for your realm + +:: + + [realms] + EXAMPLE.COM = { + kdc = localhost + } + +…which hostnames or domains map to which realm (a kerberos realm is **not** a DNS domain): + +:: + + [domain_realm] + localhost = EXAMPLE.COM + +4. Add the principals +The user principal: + +:: + + eckart@dilbert:~$ sudo kadmin -l + kadmin> add zaphod + Max ticket life [1 day]: + Max renewable life [1 week]: + Principal expiration time [never]: + Password expiration time [never]: + Attributes []: + zaphod@EXAMPLE.COM's Password: + Verifying - zaphod@EXAMPLE.COM's Password: + kadmin> quit + +The service principal: + +:: + + eckart@dilbert:~$ sudo kadmin -l + kadmin> add HTTP/localhost@EXAMPLE.COM + Max ticket life [1 day]: + Max renewable life [1 week]: + Principal expiration time [never]: + Password expiration time [never]: + Attributes []: + HTTP/localhost@EXAMPLE.COM's Password: + Verifying - HTTP/localhost@EXAMPLE.COM's Password: + kadmin> quit + +We can now try to acquire initial tickets for the principals to see if everything worked. + +:: + + eckart@dilbert:~$ kinit zaphod + zaphod@EXAMPLE.COM's Password: + +If this method returns withour error we have a success. +We can additionally list the acquired tickets: + +:: + + eckart@dilbert:~$ klist + Credentials cache: FILE:/tmp/krb5cc_1000 + Principal: zaphod@EXAMPLE.COM + + Issued Expires Principal + Oct 24 21:51:59 Oct 25 06:51:59 krbtgt/EXAMPLE.COM@EXAMPLE.COM + +This seems correct. To remove the ticket cache simply type kdestroy. + +5. Create a keytab for your service principal + +:: + + eckart@dilbert:~$ ktutil -k http.keytab add -p HTTP/localhost@EXAMPLE.COM -V 1 -e aes256-cts-hmac-sha1-96 + Password: + Verifying - Password: + eckart@dilbert:~$ + +This command will create a keytab file for the service principal named “http.keytab” in the current directory. You can specify other encryption methods than ‘aes256-cts-hmac-sha1-96’, but this is the e default encryption method for the heimdal client, so there is no additional configuration needed. You can specify other encryption types in the krb5.conf. + +Note that you might need to install the unlimited strength policy files for java from here:``_ to use the aes256 encryption from your application. + +Again we can test if the keytab generation worked with the kinit command: + +:: + + eckart@dilbert:~$ kinit -t http.keytab HTTP/localhost@EXAMPLE.COM + eckart@dilbert:~$ klist + Credentials cache: FILE:/tmp/krb5cc_1000 + Principal: HTTP/localhost@EXAMPLE.COM + + Issued Expires Principal + Oct 24 21:59:20 Oct 25 06:59:20 krbtgt/EXAMPLE.COM@EXAMPLE.COM + +Now point the configuration of the key in 'akka.conf' to the correct location and set the correct service principal name. The web application should now startup and produce at least a 401 response with a header “WWW-Authenticate” = “Negotiate”. The last step is to configure the browser. + +6. Set up Firefox to use Kerberos/SPNEGO +This is done by typing 'about:config'. Filter the config entries for “network.neg” and set the config entries “network.negotiate-auth.delegation-uris” and “network.negotiate-auth.trusted-uris” to “localhost”. +and now … + +7. Access the RESTful Actor. + +8. Have fun +… but acquire an initial ticket for the user principal first: kinit zaphod diff --git a/akka-docs/pending/serialization-java.rst b/akka-docs/pending/serialization-java.rst new file mode 100644 index 0000000000..1206211b8d --- /dev/null +++ b/akka-docs/pending/serialization-java.rst @@ -0,0 +1,178 @@ +Serialization (Java) +==================== + +Akka serialization module has been documented extensively under the Scala API section. In this section we will point out the different APIs that are available in Akka for Java based serialization of ActorRefs. The Scala APIs of ActorSerialization has implicit Format objects that set up the type class based serialization. In the Java API, the Format objects need to be specified explicitly. + +Serialization of ActorRef +========================= + +The following are the Java APIs for serialization of local ActorRefs: + +.. code-block:: scala + + /** + * Module for local actor serialization. + */ + object ActorSerialization { + // wrapper for implicits to be used by Java + def fromBinaryJ[T <: Actor](bytes: Array[Byte], format: Format[T]): ActorRef = + fromBinary(bytes)(format) + + // wrapper for implicits to be used by Java + def toBinaryJ[T <: Actor](a: ActorRef, format: Format[T], srlMailBox: Boolean = true): Array[Byte] = + toBinary(a, srlMailBox)(format) + } + +The following steps describe the procedure for serializing an Actor and ActorRef. + +Serialization of a Stateless Actor +================================== + +Step 1: Define the Actor +------------------------ + +.. code-block:: scala + + public class SerializationTestActor extends UntypedActor { + public void onReceive(Object msg) { + getContext().replySafe("got it!"); + } + } + +Step 2: Define the typeclass instance for the actor +--------------------------------------------------- + +Note how the generated Java classes are accessed using the $class based naming convention of the Scala compiler. + +.. code-block:: scala + + class SerializationTestActorFormat implements StatelessActorFormat { + @Override + public SerializationTestActor fromBinary(byte[] bytes, SerializationTestActor act) { + return (SerializationTestActor) StatelessActorFormat$class.fromBinary(this, bytes, act); + } + + @Override + public byte[] toBinary(SerializationTestActor ac) { + return StatelessActorFormat$class.toBinary(this, ac); + } + } + +**Step 3: Serialize and de-serialize** + +The following JUnit snippet first creates an actor using the default constructor. The actor is, as we saw above a stateless one. Then it is serialized and de-serialized to get back the original actor. Being stateless, the de-serialized version behaves in the same way on a message as the original actor. + +.. code-block:: java + + @Test public void mustBeAbleToSerializeAfterCreateActorRefFromClass() { + ActorRef ref = Actors.actorOf(SerializationTestActor.class); + assertNotNull(ref); + ref.start(); + try { + Object result = ref.sendRequestReply("Hello"); + assertEquals("got it!", result); + } catch (ActorTimeoutException ex) { + fail("actor should not time out"); + } + + Format f = new SerializationTestActorFormat(); + byte[] bytes = toBinaryJ(ref, f, false); + ActorRef r = fromBinaryJ(bytes, f); + assertNotNull(r); + r.start(); + try { + Object result = r.sendRequestReply("Hello"); + assertEquals("got it!", result); + } catch (ActorTimeoutException ex) { + fail("actor should not time out"); + } + ref.stop(); + r.stop(); + } + +Serialization of a Stateful Actor +================================= + +Let's now have a look at how to serialize an actor that carries a state with it. Here the expectation is that the serialization of the actor will also persist the state information. And after de-serialization we will get back the state with which it was serialized. + +Step 1: Define the Actor +------------------------ + +Here we consider an actor defined in Scala. We will however serialize using the Java APIs. + +.. code-block:: scala + + class MyUntypedActor extends UntypedActor { + var count = 0 + def onReceive(message: Any): Unit = message match { + case m: String if m == "hello" => + count = count + 1 + getContext.replyUnsafe("world " + count) + case m: String => + count = count + 1 + getContext.replyUnsafe("hello " + m + " " + count) + case _ => + throw new Exception("invalid message type") + } + } + +Note the actor has a state in the form of an Integer. And every message that the actor receives, it replies with an addition to the integer member. + +Step 2: Define the instance of the typeclass +-------------------------------------------- + +.. code-block:: java + + class MyUntypedActorFormat implements Format { + @Override + public MyUntypedActor fromBinary(byte[] bytes, MyUntypedActor act) { + ProtobufProtocol.Counter p = + (ProtobufProtocol.Counter) new SerializerFactory().getProtobuf().fromBinary(bytes, ProtobufProtocol.Counter.class); + act.count_$eq(p.getCount()); + return act; + } + + @Override + public byte[] toBinary(MyUntypedActor ac) { + return ProtobufProtocol.Counter.newBuilder().setCount(ac.count()).build().toByteArray(); + } + } + +Note the usage of Protocol Buffers to serialize the state of the actor. + +Step 3: Serialize and de-serialize +---------------------------------- + +.. code-block:: java + + @Test public void mustBeAbleToSerializeAStatefulActor() { + ActorRef ref = Actors.actorOf(MyUntypedActor.class); + assertNotNull(ref); + ref.start(); + try { + Object result = ref.sendRequestReply("hello"); + assertEquals("world 1", result); + result = ref.sendRequestReply("hello"); + assertEquals("world 2", result); + } catch (ActorTimeoutException ex) { + fail("actor should not time out"); + } + + Format f = new MyUntypedActorFormat(); + byte[] bytes = toBinaryJ(ref, f, false); + ActorRef r = fromBinaryJ(bytes, f); + assertNotNull(r); + r.start(); + try { + Object result = r.sendRequestReply("hello"); + assertEquals("world 3", result); + result = r.sendRequestReply("hello"); + assertEquals("world 4", result); + } catch (ActorTimeoutException ex) { + fail("actor should not time out"); + } + ref.stop(); + r.stop(); + } + +Note how the de-serialized version starts with the state value with which it was earlier serialized. diff --git a/akka-docs/pending/serialization-scala.rst b/akka-docs/pending/serialization-scala.rst new file mode 100644 index 0000000000..93b738a176 --- /dev/null +++ b/akka-docs/pending/serialization-scala.rst @@ -0,0 +1,978 @@ +Serialization (Scala) +===================== + +Module stability: **SOLID** + +Serialization of ActorRef +========================= + +An Actor can be serialized in two different ways: + +* Serializable RemoteActorRef - Serialized to an immutable, network-aware Actor reference that can be freely shared across the network. They "remember" and stay mapped to their original Actor instance and host node, and will always work as expected. +* Serializable LocalActorRef - Serialized by doing a deep copy of both the ActorRef and the Actor instance itself. Can be used to physically move an Actor from one node to another and continue the execution there. + +Both of these can be sent as messages over the network and/or store them to disk, in a persistent storage backend etc. + +Actor serialization in Akka is implemented through a type class 'Format[T <: Actor]' which publishes the 'fromBinary' and 'toBinary' methods for serialization. Here's the complete definition of the type class: + +.. code-block:: scala + + /** + * Type class definition for Actor Serialization + */ + trait FromBinary[T <: Actor] { + def fromBinary(bytes: Array[Byte], act: T): T + } + + trait ToBinary[T <: Actor] { + def toBinary(t: T): Array[Byte] + } + + // client needs to implement Format[] for the respective actor + trait Format[T <: Actor] extends FromBinary[T] with ToBinary[T] + +**Deep serialization of an Actor and ActorRef** +----------------------------------------------- + +You can serialize the whole actor deeply, e.g. both the 'ActorRef' and then instance of its 'Actor'. This can be useful if you want to move an actor from one node to another, or if you want to store away an actor, with its state, into a database. + +Here is an example of how to serialize an Actor. + +Step 1: Define the actor +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + class MyActor extends Actor { + var count = 0 + + def receive = { + case "hello" => + count = count + 1 + self.reply("world " + count) + } + } + +Step 2: Implement the type class for the actor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + object BinaryFormatMyActor { + implicit object MyActorFormat extends Format[MyActor] { + def fromBinary(bytes: Array[Byte], act: MyActor) = { + val p = Serializer.Protobuf.fromBinary(bytes, Some(classOf[ProtobufProtocol.Counter])).asInstanceOf[ProtobufProtocol.Counter] + act.count = p.getCount + act + } + def toBinary(ac: MyActor) = + ProtobufProtocol.Counter.newBuilder.setCount(ac.count).build.toByteArray + } + } + } + +Step 3: Import the type class module definition and serialize / de-serialize +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + it("should be able to serialize and de-serialize a stateful actor") { + import akka.serialization.ActorSerialization._ + import BinaryFormatMyActor._ + + val actor1 = actorOf[MyActor].start() + (actor1 !! "hello").getOrElse("_") should equal("world 1") + (actor1 !! "hello").getOrElse("_") should equal("world 2") + + val bytes = toBinary(actor1) + val actor2 = fromBinary(bytes) + actor2.start() + (actor2 !! "hello").getOrElse("_") should equal("world 3") + } + +**Helper Type Class for Stateless Actors** + +If your actor is stateless, then you can use the helper trait that Akka provides to serialize / de-serialize. Here's the definition: + +.. code-block:: scala + + trait StatelessActorFormat[T <: Actor] extends Format[T] { + def fromBinary(bytes: Array[Byte], act: T) = act + def toBinary(ac: T) = Array.empty[Byte] + } + +Then you use it as follows: + +.. code-block:: scala + + class MyStatelessActor extends Actor { + def receive = { + case "hello" => + self.reply("world") + } + } + +Just create an object for the helper trait for your actor: + +.. code-block:: scala + + object BinaryFormatMyStatelessActor { + implicit object MyStatelessActorFormat extends StatelessActorFormat[MyStatelessActor] + } + +and use it for serialization: + +.. code-block:: scala + + it("should be able to serialize and de-serialize a stateless actor") { + import akka.serialization.ActorSerialization._ + import BinaryFormatMyStatelessActor._ + + val actor1 = actorOf[MyStatelessActor].start() + (actor1 !! "hello").getOrElse("_") should equal("world") + (actor1 !! "hello").getOrElse("_") should equal("world") + + val bytes = toBinary(actor1) + val actor2 = fromBinary(bytes) + actor2.start() + (actor2 !! "hello").getOrElse("_") should equal("world") + } + +**Helper Type Class for actors with external serializer** + +Use the trait 'SerializerBasedActorFormat' for specifying serializers. + +.. code-block:: scala + + trait SerializerBasedActorFormat[T <: Actor] extends Format[T] { + val serializer: Serializer + def fromBinary(bytes: Array[Byte], act: T) = serializer.fromBinary(bytes, Some(act.self.actorClass)).asInstanceOf[T] + def toBinary(ac: T) = serializer.toBinary(ac) + } + +For a Java serializable actor: + +.. code-block:: scala + + @serializable class MyJavaSerializableActor extends Actor { + var count = 0 + + def receive = { + case "hello" => + count = count + 1 + self.reply("world " + count) + } + } + +Create a module for the type class .. + +.. code-block:: scala + + object BinaryFormatMyJavaSerializableActor { + implicit object MyJavaSerializableActorFormat extends SerializerBasedActorFormat[MyJavaSerializableActor] { + val serializer = Serializer.Java + } + } + +and serialize / de-serialize .. + +.. code-block:: scala + + it("should be able to serialize and de-serialize a stateful actor with a given serializer") { + import akka.serialization.ActorSerialization._ + import BinaryFormatMyJavaSerializableActor._ + + val actor1 = actorOf[MyJavaSerializableActor].start() + (actor1 !! "hello").getOrElse("_") should equal("world 1") + (actor1 !! "hello").getOrElse("_") should equal("world 2") + + val bytes = toBinary(actor1) + val actor2 = fromBinary(bytes) + actor2.start() + (actor2 !! "hello").getOrElse("_") should equal("world 3") + } + +**Serialization of a RemoteActorRef** +------------------------------------- + +You can serialize an 'ActorRef' to an immutable, network-aware Actor reference that can be freely shared across the network, a reference that "remembers" and stay mapped to its original Actor instance and host node, and will always work as expected. + +The 'RemoteActorRef' serialization is based upon Protobuf (Google Protocol Buffers) and you don't need to do anything to use it, it works on any 'ActorRef' (as long as the actor has **not** implemented one of the 'SerializableActor' traits, since then deep serialization will happen). + +Currently Akka will **not** autodetect an 'ActorRef' as part of your message and serialize it for you automatically, so you have to do that manually or as part of your custom serialization mechanisms. + +Here is an example of how to serialize an Actor. + +.. code-block:: scala + + val actor1 = actorOf[MyActor] + + val bytes = toBinary(actor1) + +To deserialize the 'ActorRef' to a 'RemoteActorRef' you need to use the 'fromBinaryToRemoteActorRef(bytes: Array[Byte])' method on the 'ActorRef' companion object: + +.. code-block:: scala + + import RemoteActorSerialization._ + val actor2 = fromBinaryToRemoteActorRef(bytes) + +You can also pass in a class loader to load the 'ActorRef' class and dependencies from: + +.. code-block:: scala + + import RemoteActorSerialization._ + val actor2 = fromBinaryToRemoteActorRef(bytes, classLoader) + +Deep serialization of a TypedActor +---------------------------------- + +Serialization of typed actors works almost the same way as untyped actors. You can serialize the whole actor deeply, e.g. both the 'proxied ActorRef' and the instance of its 'TypedActor'. + +Here is the example from above implemented as a TypedActor. + +^ + +Step 1: Define the actor +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + trait MyTypedActor { + def requestReply(s: String) : String + def oneWay() : Unit + } + + class MyTypedActorImpl extends TypedActor with MyTypedActor { + var count = 0 + + override def requestReply(message: String) : String = { + count = count + 1 + "world " + count + } + } + +Step 2: Implement the type class for the actor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + class MyTypedActorFormat extends Format[MyTypedActorImpl] { + def fromBinary(bytes: Array[Byte], act: MyTypedActorImpl) = { + val p = Serializer.Protobuf.fromBinary(bytes, Some(classOf[ProtobufProtocol.Counter])).asInstanceOf[ProtobufProtocol.Counter] + act.count = p.getCount + act + } + def toBinary(ac: MyTypedActorImpl) = ProtobufProtocol.Counter.newBuilder.setCount(ac.count).build.toByteArray + } + +Step 3: Import the type class module definition and serialize / de-serialize +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val typedActor1 = TypedActor.newInstance(classOf[MyTypedActor], classOf[MyTypedActorImpl], 1000) + + val f = new MyTypedActorFormat + val bytes = toBinaryJ(typedActor1, f) + + val typedActor2: MyTypedActor = fromBinaryJ(bytes, f) //type hint needed + typedActor2.requestReply("hello") + +- + +Serialization of a remote typed ActorRef +---------------------------------------- + +To deserialize the TypedActor to a 'RemoteTypedActorRef' (an aspectwerkz proxy to a RemoteActorRef) you need to use the 'fromBinaryToRemoteTypedActorRef(bytes: Array[Byte])' method on 'RemoteTypedActorSerialization' object: + +.. code-block:: scala + + import RemoteTypedActorSerialization._ + val typedActor = fromBinaryToRemoteTypedActorRef(bytes) + + // you can also pass in a class loader + val typedActor2 = fromBinaryToRemoteTypedActorRef(bytes, classLoader) + +Compression +=========== + +Akka has a helper class for doing compression of binary data. This can be useful for example when storing data in one of the backing storages. It currently supports LZF which is a very fast compression algorithm suited for runtime dynamic compression. + +Here is an example of how it can be used: + +.. code-block:: scala + + import akka.serialization.Compression + + val bytes: Array[Byte] = ... + val compressBytes = Compression.LZF.compress(bytes) + val uncompressBytes = Compression.LZF.uncompress(compressBytes) + +Using the Serializable trait and Serializer class for custom serialization +========================================================================== + +If you are sending messages to a remote Actor and these messages implement one of the predefined interfaces/traits in the 'akka.serialization.Serializable.*' object, then Akka will transparently detect which serialization format it should use as wire protocol and will automatically serialize and deserialize the message according to this protocol. + +Each serialization interface/trait in +* akka.serialization.Serializable.* +> has a matching serializer in +* akka.serialization.Serializer.* + +Note however that if you are using one of the Serializable interfaces then you don’t have to do anything else in regard to sending remote messages. + +The ones currently supported are (besides the default which is regular Java serialization): +* ScalaJON (Scala only) +* JavaJSON (Java but some Scala structures) +* SBinary (Scala only) +* Protobuf (Scala and Java) + +Apart from the above, Akka also supports Scala object serialization through `SJSON `_ that implements APIs similar to 'akka.serialization.Serializer.*'. See the section on SJSON below for details. + +Protobuf +-------- + +Akka supports using `Google Protocol Buffers `_ to serialize your objects. Protobuf is a very efficient network serialization protocol which is also used internally by Akka. The remote actors understand Protobuf messages so if you just send them as they are they will be correctly serialized and unserialized. + +Here is an example. + +Let's say you have this Protobuf message specification that you want to use as message between remote actors. First you need to compiled it with 'protoc' compiler. + +.. code-block:: scala + + message ProtobufPOJO { + required uint64 id = 1; + required string name = 2; + required bool status = 3; + } + +When you compile the spec you will among other things get a message builder. You then use this builder to create the messages to send over the wire: + +.. code-block:: scala + + val result = remoteActor !! ProtobufPOJO.newBuilder + .setId(11) + .setStatus(true) + .setName("Coltrane") + .build + +The remote Actor can then receive the Protobuf message typed as-is: + +.. code-block:: scala + + class MyRemoteActor extends Actor { + def receive = { + case pojo: ProtobufPOJO => + val id = pojo.getId + val status = pojo.getStatus + val name = pojo.getName + ... + } + } + +JSON: Scala +----------- + +Use the akka.serialization.Serialization.ScalaJSON base class with its toJSON method. Akka’s Scala JSON is based upon the SJSON library. + +For your POJOs to be able to serialize themselves you have to extend the ScalaJSON[] trait as follows. JSON serialization is based on a type class protocol which you need to define for your own abstraction. The instance of the type class is defined as an implicit object which is used for serialization and de-serialization. You also need to implement the methods in terms of the APIs which sjson publishes. + +.. code-block:: scala + + import akka.serialization.Serializer + import akka.serialization.Serializable.ScalaJSON + import scala.reflect.BeanInfo + + case class MyMessage(val id: String, val value: Tuple2[String, Int]) extends ScalaJSON[MyMessage] { + // type class instance + implicit val MyMessageFormat: sjson.json.Format[MyMessage] = + asProduct2("id", "value")(MyMessage)(MyMessage.unapply(_).get) + + def toJSON: String = JsValue.toJson(tojson(this)) + def toBytes: Array[Byte] = tobinary(this) + def fromBytes(bytes: Array[Byte]) = frombinary[MyMessage](bytes) + def fromJSON(js: String) = fromjson[MyMessage](Js(js)) + } + + // sample test case + it("should be able to serialize and de-serialize MyMessage") { + val s = MyMessage("Target", ("cooker", 120)) + s.fromBytes(s.toBytes) should equal(s) + s.fromJSON(s.toJSON) should equal(s) + } + +Use akka.serialization.Serializer.ScalaJSON to do generic JSON serialization, e.g. serialize object that does not extend ScalaJSON using the JSON serializer. Serialization using Serializer can be done in two ways :- + +1. Type class based serialization (recommended) +2. Reflection based serialization + +We will discuss both of these techniques in this section. For more details refer to the discussion in the next section SJSON: Scala. + +Serializer API using type classes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here are the steps that you need to follow: + +1. Define your class + +.. code-block:: scala + + case class MyMessage(val id: String, val value: Tuple2[String, Int]) + +2. Define the type class instance + +.. code-block:: scala + + import DefaultProtocol._ + implicit val MyMessageFormat: sjson.json.Format[MyMessage] = + asProduct2("id", "value")(MyMessage)(MyMessage.unapply(_).get) + +3. Serialize + +.. code-block:: scala + + import akka.serialization.Serializer.ScalaJSON + + val o = MyMessage("dg", ("akka", 100)) + fromjson[MyMessage](tojson(o)) should equal(o) + frombinary[MyMessage](tobinary(o)) should equal(o) + +Serializer API using reflection +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can also use the Serializer abstraction to serialize using reflection based serialization API of sjson. But we recommend using the type class based one, because reflection based serialization has limitations due to type erasure. Here's an example of reflection based serialization: + +.. code-block:: scala + + import akka.serialization.Serializer + import scala.reflect.BeanInfo + + @BeanInfo case class Foo(name: String) { + def this() = this(null) // default constructor is necessary for deserialization + } + + val foo = new Foo("bar") + val json = Serializer.ScalaJSON.out(foo) + + val fooCopy = Serializer.ScalaJSON.in(json) // returns a JsObject as an AnyRef + + val fooCopy2 = Serializer.ScalaJSON.in(new String(json)) // can also take a string as input + + val fooCopy3 = Serializer.ScalaJSON.in[Foo](json).asInstanceOf[Foo] + +Classes without a @BeanInfo annotation cannot be serialized as JSON. +So if you see something like that: + +.. code-block:: scala + + scala> Serializer.ScalaJSON.out(bar) + Serializer.ScalaJSON.out(bar) + java.lang.UnsupportedOperationException: Class class Bar not supported for conversion + at sjson.json.JsBean$class.toJSON(JsBean.scala:210) + at sjson.json.Serializer$SJSON$.toJSON(Serializer.scala:107) + at sjson.json.Serializer$SJSON$class.out(Serializer.scala:37) + at sjson.json.Serializer$SJSON$.out(Serializer.scala:107) + at akka.serialization.Serializer$ScalaJSON... + +it means, that you haven't got a @BeanInfo annotation on your class. + +You may also see this exception when trying to serialize a case class with out an attribute like this: + +.. code-block:: scala + + @BeanInfo case class Empty() // cannot be serialized + + SJSON: Scala +------------- + +SJSON supports serialization of Scala objects into JSON. It implements support for built in Scala structures like List, Map or String as well as custom objects. SJSON is available as an Apache 2 licensed project on Github `here `_. + +Example: I have a Scala object as .. + +.. code-block:: scala + + val addr = Address("Market Street", "San Francisco", "956871") + +where Address is a custom class defined by the user. Using SJSON, I can store it as JSON and retrieve as plain old Scala object. Here’s the simple assertion that validates the invariant. Note that during de-serialziation, the class name is specified. Hence what it gives back is an instance of Address. + +.. code-block:: scala + + addr should equal( + serializer.in[Address](serializer.out(addr))) + +Note, that the class needs to have a default constructor. Otherwise the deserialization into the specified class will fail. + +There are situations, particularly when writing generic persistence libraries in Akka, when the exact class is not known during de-serialization. Using SJSON I can get it as AnyRef or Nothing .. + +.. code-block:: scala + + serializer.in[AnyRef](serializer.out(addr)) + +or just as .. + +.. code-block:: scala + + serializer.in(serializer.out(addr)) + +What you get back from is a JsValue, an abstraction of the JSON object model. For details of JsValueimplementation, refer to `dispatch-json `_ that SJSON uses as the underlying JSON parser implementation. Once I have the JsValue model, I can use use extractors to get back individual attributes .. + +.. code-block:: scala + + val a = serializer.in[AnyRef](serializer.out(addr)) + + // use extractors + val c = 'city ? str + val c(_city) = a + _city should equal("San Francisco") + + val s = 'street ? str + val s(_street) = a + _street should equal("Market Street") + + val z = 'zip ? str + val z(_zip) = a + _zip should equal("956871") + +Serialization of Embedded Objects +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + SJSON supports serialization of Scala objects that have other embedded objects. Suppose you have the following Scala classes .. Here Contact has an embedded Address Map .. + +.. code-block:: scala + + @BeanInfo + case class Contact(name: String, + @(JSONTypeHint @field)(value = classOf[Address]) + addresses: Map[String, Address]) { + + override def toString = "name = " + name + " addresses = " + + addresses.map(a => a._1 + ":" + a._2.toString).mkString(",") + } + + @BeanInfo + case class Address(street: String, city: String, zip: String) { + override def toString = "address = " + street + "/" + city + "/" + zip + } + +With SJSON, I can do the following: + +.. code-block:: scala + + val a1 = Address("Market Street", "San Francisco", "956871") + val a2 = Address("Monroe Street", "Denver", "80231") + val a3 = Address("North Street", "Atlanta", "987671") + + val c = Contact("Bob", Map("residence" -> a1, "office" -> a2, "club" -> a3)) + val co = serializer.out(c) + + // with class specified + c should equal(serializer.in[Contact](co)) + + // no class specified + val a = serializer.in[AnyRef](co) + + // extract name + val n = 'name ? str + val n(_name) = a + "Bob" should equal(_name) + + // extract addresses + val addrs = 'addresses ? obj + val addrs(_addresses) = a + + // extract residence from addresses + val res = 'residence ? obj + val res(_raddr) = _addresses + + // make an Address bean out of _raddr + val address = JsBean.fromJSON(_raddr, Some(classOf[Address])) + a1 should equal(address) + + object r { def ># [T](f: JsF[T]) = f(a.asInstanceOf[JsValue]) } + + // still better: chain 'em up + "Market Street" should equal( + (r ># { ('addresses ? obj) andThen ('residence ? obj) andThen ('street ? str) })) + +^ + +Changing property names during serialization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + @BeanInfo + case class Book(id: Number, + title: String, @(JSONProperty @getter)(value = "ISBN") isbn: String) { + + override def toString = "id = " + id + " title = " + title + " isbn = " + isbn + } + +When this will be serialized out, the property name will be changed. + +.. code-block:: scala + + val b = new Book(100, "A Beautiful Mind", "012-456372") + val jsBook = Js(JsBean.toJSON(b)) + val expected_book_map = Map( + JsString("id") -> JsNumber(100), + JsString("title") -> JsString("A Beautiful Mind"), + JsString("ISBN") -> JsString("012-456372") + ) + +^ + +Serialization with ignore properties +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When serializing objects, some of the properties can be ignored declaratively. Consider the following class declaration: + +.. code-block:: scala + + @BeanInfo + case class Journal(id: BigDecimal, + title: String, + author: String, + @(JSONProperty @getter)(ignore = true) issn: String) { + + override def toString = + "Journal: " + id + "/" + title + "/" + author + + (issn match { + case null => "" + case _ => "/" + issn + }) + } + +The annotation @JSONProperty can be used to selectively ignore fields. When I serialize a Journal object out and then back in, the content of issn field will be null. + +.. code-block:: scala + + it("should ignore issn field") { + val j = Journal(100, "IEEE Computer", "Alex Payne", "012-456372") + serializer.in[Journal](serializer.out(j)).asInstanceOf[Journal].issn should equal(null) + } + +Similarly, we can ignore properties of an object **only** if they are null and not ignore otherwise. Just specify the annotation @JSONProperty as @JSONProperty {val ignoreIfNull = true}. + +^ + +Serialization with Type Hints for Generic Data Members +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Consider the following Scala class: + +.. code-block:: scala + + @BeanInfo + case class Contact(name: String, + @(JSONTypeHint @field)(value = classOf[Address]) + addresses: Map[String, Address]) { + + override def toString = "name = " + name + " addresses = " + + addresses.map(a => a._1 + ":" + a._2.toString).mkString(",") + } + +Because of erasure, you need to add the type hint declaratively through the annotation @JSONTypeHint that +SJSON will pick up during serialization. No we can say: + +.. code-block:: scala + + val c = Contact("Bob", Map("residence" -> a1, "office" -> a2, "club" -> a3)) + val co = serializer.out(c) + + it("should give an instance of Contact") { + c should equal(serializer.in[Contact](co)) + } + +With optional generic data members, we need to provide the hint to SJSON through another annotation@OptionTypeHint. + +.. code-block:: scala + + @BeanInfo + case class ContactWithOptionalAddr(name: String, + @(JSONTypeHint @field)(value = classOf[Address]) + @(OptionTypeHint @field)(value = classOf[Map[_,_]]) + addresses: Option[Map[String, Address]]) { + + override def toString = "name = " + name + " " + + (addresses match { + case None => "" + case Some(ad) => " addresses = " + ad.map(a => a._1 + ":" + a._2.toString).mkString(",") + }) + } + +Serialization works ok with optional members annotated as above. + +.. code-block:: scala + + describe("Bean with optional bean member serialization") { + it("should serialize with Option defined") { + val c = new ContactWithOptionalAddr("Debasish Ghosh", + Some(Map("primary" -> new Address("10 Market Street", "San Francisco, CA", "94111"), + "secondary" -> new Address("3300 Tamarac Drive", "Denver, CO", "98301")))) + c should equal( + serializer.in[ContactWithOptionalAddr](serializer.out(c))) + } + } + +You can also specify a custom ClassLoader while using SJSON serializer: + +.. code-block:: scala + + object SJSON { + val classLoader = //.. specify a custom classloader + } + + import SJSON._ + serializer.out(..) + + //.. + +Fighting Type Erasure +^^^^^^^^^^^^^^^^^^^^^ + +Because of type erasure, it's not always possible to infer the correct type during de-serialization of objects. Consider the following example: + +.. code-block:: scala + + abstract class A + @BeanInfo case class B(param1: String) extends A + @BeanInfo case class C(param1: String, param2: String) extends A + + @BeanInfo case class D(@(JSONTypeHint @field)(value = classOf[A])param1: List[A]) + +and the serialization code like the following: + +.. code-block:: scala + + object TestSerialize{ + def main(args: Array[String]) { + val test1 = new D(List(B("hello1"))) + val json = sjson.json.Serializer.SJSON.out(test1) + val res = sjson.json.Serializer.SJSON.in[D](json) + val res1: D = res.asInstanceOf[D] + println(res1) + } + } + +Note that the type hint on class D says A, but the actual instances that have been put into the object before serialization is one of the derived classes (B). During de-serialization, we have no idea of what can be inside D. The serializer.in API will fail since all hint it has is for A, which is abstract. In such cases, we need to handle the de-serialization by using extractors over the underlying data structure that we use for storing JSON objects, which is JsValue. Here's an example: + +.. code-block:: scala + + val test1 = new D(List(B("hello1"))) + val json = serializer.out(test1) + + // create a JsValue from the string + val js = Js(new String(json)) + + // extract the named list argument + val m = (Symbol("param1") ? list) + val m(_m) = js + + // extract the string within + val s = (Symbol("param1") ? str) + + // form a list of B's + val result = _m.map{ e => + val s(_s) = e + B(_s) + } + + // form a D + println("result = " + D(result)) + +The above snippet de-serializes correctly using extractors defined on JsValue. For more details on JsValue and the extractors, please refer to `dispatch-json `_ . + +**NOTE**: Serialization with SJSON is based on bean introspection. In the current version of Scala (2.8.0.Beta1 and 2.7.7) there is a bug where bean introspection does not work properly for classes enclosed within another class. Please ensure that the beans are the top level classes in your application. They can be within objects though. A ticket has been filed in the Scala Tracker and also fixed in the trunk. Here's the `ticket `_ . + +Type class based Serialization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If type erasure hits you, reflection based serialization may not be the right option. In fact the last section shows some of the scenarios which may not be possible to handle using reflection based serialization of sjson. sjson also supports type class based serialization where you can provide a custom protocol for serialization as part of the type class implementation. + +Here's a sample session at the REPL which shows the default serialization protocol of sjson: + +.. code-block:: scala + + scala> import sjson.json._ + import sjson.json._ + + scala> import DefaultProtocol._ + import DefaultProtocol._ + + scala> val str = "debasish" + str: java.lang.String = debasish + + scala> import JsonSerialization._ + import JsonSerialization._ + + scala> tojson(str) + res0: dispatch.json.JsValue = "debasish" + + scala> fromjson[String](res0) + res1: String = debasish + +You can use serialization of generic data types using the default protocol as well: + +.. code-block:: scala + + scala> val list = List(10, 12, 14, 18) + list: List[Int] = List(10, 12, 14, 18) + + scala> tojson(list) + res2: dispatch.json.JsValue = [10, 12, 14, 18] + + scala> fromjson[List[Int]](res2) + res3: List[Int] = List(10, 12, 14, 18) + +You can also define your own custom protocol, which as to be an implementation of the following type class: + +.. code-block:: scala + + trait Writes[T] { + def writes(o: T): JsValue + } + + trait Reads[T] { + def reads(json: JsValue): T + } + + trait Format[T] extends Writes[T] with Reads[T] + +Consider a case class and a custom protocol to serialize it into JSON. Here's the type class implementation: + +.. code-block:: scala + + object Protocols { + case class Person(lastName: String, firstName: String, age: Int) + object PersonProtocol extends DefaultProtocol { + import dispatch.json._ + import JsonSerialization._ + + implicit object PersonFormat extends Format[Person] { + def reads(json: JsValue): Person = json match { + case JsObject(m) => + Person(fromjson[String](m(JsString("lastName"))), + fromjson[String](m(JsString("firstName"))), fromjson[Int](m(JsString("age")))) + case _ => throw new RuntimeException("JsObject expected") + } + + def writes(p: Person): JsValue = + JsObject(List( + (tojson("lastName").asInstanceOf[JsString], tojson(p.lastName)), + (tojson("firstName").asInstanceOf[JsString], tojson(p.firstName)), + (tojson("age").asInstanceOf[JsString], tojson(p.age)) )) + } + } + } + +and the serialization in action in the REPL: + +.. code-block:: scala + + scala> import sjson.json._ + import sjson.json._ + + scala> import Protocols._ + import Protocols._ + + scala> import PersonProtocol._ + import PersonProtocol._ + + scala> val p = Person("ghosh", "debasish", 20) + p: sjson.json.Protocols.Person = Person(ghosh,debasish,20) + + scala> import JsonSerialization._ + import JsonSerialization._ + + scala> tojson[Person](p) + res1: dispatch.json.JsValue = {"lastName" : "ghosh", "firstName" : "debasish", "age" : 20} + + scala> fromjson[Person](res1) + res2: sjson.json.Protocols.Person = Person(ghosh,debasish,20) + +There are other nifty ways to implement case class serialization using sjson. For more details, have a look at the `wiki `_ for sjson. + +**JSON: Java** + +Use the akka.serialization.Serialization.JavaJSON base class with its toJSONmethod. Akka’s Java JSON is based upon the Jackson library. + +For your POJOs to be able to serialize themselves you have to extend the JavaJSON trait. + +.. code-block:: java + + class MyMessage extends JavaJSON { + private String name = null; + public MyMessage(String name) { + this.name = name; + } + public String getName() { + return name; + } + } + + MyMessage message = new MyMessage("json"); + String json = message.toJSON(); + SerializerFactory factory = new SerializerFactory(); + MyMessage messageCopy = factory.getJavaJSON().in(json); + +Use the akka.serialization.SerializerFactory.getJavaJSON to do generic JSONserialization, e.g. serialize object that does not extend JavaJSON using the JSON serializer. + +.. code-block:: java + + Foo foo = new Foo(); + SerializerFactory factory = new SerializerFactory(); + String json = factory.getJavaJSON().out(foo); + Foo fooCopy = factory.getJavaJSON().in(json, Foo.class); + +- + +SBinary: Scala +-------------- + +To serialize Scala structures you can use SBinary serializer. SBinary can serialize all primitives and most default Scala datastructures; such as List, Tuple, Map, Set, BigInt etc. + +Here is an example of using the akka.serialization.Serializer.SBinary serializer to serialize standard Scala library objects. + +.. code-block:: scala + + import akka.serialization.Serializer + import sbinary.DefaultProtocol._ // you always need to import these implicits + val users = List(("user1", "passwd1"), ("user2", "passwd2"), ("user3", "passwd3")) + val bytes = Serializer.SBinary.out(users) + val usersCopy = Serializer.SBinary.in(bytes, Some(classOf[List[Tuple2[String,String]]])) + +If you need to serialize your own user-defined objects then you have to do three things: +# Define an empty constructor +# Mix in the Serializable.SBinary[T] trait, and implement its methods: +## fromBytes(bytes: Array[Byte])[T] +## toBytes: Array[Byte] +# Create an implicit sbinary.Format[T] object for your class. Which means that you have to define its two methods: +## reads(in: Input): T; in which you read in all the fields in your object, using read[FieldType](in)and recreate it. +## writes(out: Output, value: T): Unit; in which you write out all the fields in your object, usingwrite[FieldType](out, value.field). + +Here is an example: +``_ +case class User(val usernamePassword: Tuple2[String, String], val email: String, val age: Int) + extends Serializable.SBinary[User] { + import sbinary.DefaultProtocol._ + import sbinary.Operations._ + + def this() = this(null, null, 0) + + implicit object UserFormat extends Format[User] { + def reads(in : Input) = User( + read[Tuple2[String, String]](in), + read[String](in), + read[Int](in)) + def writes(out: Output, value: User) = { + write[Tuple2[String, String]](out, value.usernamePassword) + write[String](out, value.email) + write[Int](out, value.age) + } + } + + def fromBytes(bytes: Array[Byte]) = fromByteArray[User](bytes) + + def toBytes: Array[Byte] = toByteArray(this) +} +``_ diff --git a/akka-docs/pending/servlet.rst b/akka-docs/pending/servlet.rst new file mode 100644 index 0000000000..6859657a72 --- /dev/null +++ b/akka-docs/pending/servlet.rst @@ -0,0 +1,41 @@ +Akka Servlet +============ + += + +Module stability: **STABLE** + +Akka has a servlet; ‘se.scalablesolutions.akka.comet.AkkaServlet’ that can use to deploy your Akka-based application in an external Servlet container. All you need to do is to add the servlet to the ‘web.xml’, set ‘$AKKA_HOME’ to the root of the distribution (needs the ‘$AKKA_HOME/config/*’ files) and add the JARs in the ‘$AKKA_HOME/lib’ to your classpath (or put them in the ‘WEB-INF/lib’ directory in the WAR file). + +Also, you need to add the Akka initialize/cleanup listener in web.xml + +.. code-block:: xml + + + ... + + se.scalablesolutions.akka.servlet.Initializer + + ... + + +And to support REST actors and/or comet actors, you need to add the following servlet declaration: + +``_ + +... + + Akka + + se.scalablesolutions.akka.comet.AkkaServlet + + se.scalablesolutions.akka.rest.AkkaServlet + + + * + Akka + +... + + +``_ diff --git a/akka-docs/pending/slf4j.rst b/akka-docs/pending/slf4j.rst new file mode 100644 index 0000000000..780030a543 --- /dev/null +++ b/akka-docs/pending/slf4j.rst @@ -0,0 +1,19 @@ +SLF4J +===== + +This module is available in the 'akka-slf4j.jar'. It has one single dependency; the slf4j-api jar. + +Event Handler +------------- + +This module includes a SLF4J Event Handler that works with Akka's standard Event Handler. You enabled it in the 'event-handlers' element in akka.conf. Here you can also define the log level. + +.. code-block:: ruby + + akka { + event-handlers = ["akka.event.slf4j.Slf4jEventHandler"] + event-handler-level = "DEBUG" + } + +Read more about how to use the event handler `here `_. + diff --git a/akka-docs/pending/sponsors.rst b/akka-docs/pending/sponsors.rst new file mode 100644 index 0000000000..65faac6794 --- /dev/null +++ b/akka-docs/pending/sponsors.rst @@ -0,0 +1,15 @@ +****Sponsors **** +======================================================= + +Scalable Solutions +================== + +Scalable Solutions AB is commercial entity behind Akka, providing support, consulting and training around Akka. +``_ + +YourKit +======= + +YourKit is kindly supporting open source projects with its full-featured Java Profiler. +YourKit, LLC is the creator of innovative and intelligent tools for profiling Java and .NET applications. +Take a look at YourKit’s leading software products: `YourKit Java Profiler `_ and `YourKit .NET Profiler `_ diff --git a/akka-docs/pending/stm-java.rst b/akka-docs/pending/stm-java.rst new file mode 100644 index 0000000000..7873e38a7a --- /dev/null +++ b/akka-docs/pending/stm-java.rst @@ -0,0 +1,522 @@ +Software Transactional Memory (Java) +==================================== + +Module stability: **SOLID** + +Overview of STM +=============== + +An `STM `_ turns the Java heap into a transactional data set with begin/commit/rollback semantics. Very much like a regular database. It implements the first three letters in ACID; ACI: +* (failure) Atomicity: all changes during the execution of a transaction make it, or none make it. This only counts for transactional datastructures. +* Consistency: a transaction gets a consistent of reality (in Akka you get the Oracle version of the SERIALIZED isolation level). +* Isolated: changes made by concurrent execution transactions are not visible to each other. + +Generally, the STM is not needed that often when working with Akka. Some use-cases (that we can think of) are: +# When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +# When you want to share a datastructure across actors. +# When you need to use the persistence modules. + +Akka’s STM implements the concept in `Clojure’s `_ STM view on state in general. Please take the time to read `this excellent document `_ and view `this presentation `_ by Rich Hickey (the genius behind Clojure), since it forms the basis of Akka’s view on STM and state in general. + +The STM is based on Transactional References (referred to as Refs). Refs are memory cells, holding an (arbitrary) immutable value, that implement CAS (Compare-And-Swap) semantics and are managed and enforced by the STM for coordinated changes across many Refs. They are implemented using the excellent `Multiverse STM `_. + +Working with immutable collections can sometimes give bad performance due to extensive copying. Scala provides so-called persistent datastructures which makes working with immutable collections fast. They are immutable but with constant time access and modification. The use of structural sharing and an insert or update does not ruin the old structure, hence “persistent”. Makes working with immutable composite types fast. The persistent datastructures currently consist of a Map and Vector. + +Simple example +============== + +Here is a simple example of an incremental counter using STM. This shows creating a ``Ref``, a transactional reference, and then modifying it within a transaction, which is delimited by an ``Atomic`` anonymous inner class. + +.. code-block:: java + + import akka.stm.*; + + final Ref ref = new Ref(0); + + public int counter() { + return new Atomic() { + public Integer atomically() { + int inc = ref.get() + 1; + ref.set(inc); + return inc; + } + }.execute(); + } + + counter(); + // -> 1 + + counter(); + // -> 2 + +---- + +Ref +=== + +Refs (transactional references) are mutable references to values and through the STM allow the safe sharing of mutable data. To ensure safety the value stored in a Ref should be immutable. The value referenced by a Ref can only be accessed or swapped within a transaction. Refs separate identity from value. + +Creating a Ref +-------------- + +You can create a Ref with or without an initial value. + +.. code-block:: java + + import akka.stm.*; + + // giving an initial value + final Ref ref = new Ref(0); + + // specifying a type but no initial value + final Ref ref = new Ref(); + +Accessing the value of a Ref +---------------------------- + +Use ``get`` to access the value of a Ref. Note that if no initial value has been given then the value is initially ``null``. + +.. code-block:: java + + import akka.stm.*; + + final Ref ref = new Ref(0); + + Integer value = new Atomic() { + public Integer atomically() { + return ref.get(); + } + }.execute(); + // -> value = 0 + +Changing the value of a Ref +--------------------------- + +To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), which sets the new value and returns the old value. + +.. code-block:: java + + import akka.stm.*; + + final Ref ref = new Ref(0); + + new Atomic() { + public Object atomically() { + return ref.set(5); + } + }.execute(); + +---- + +Transactions +============ + +A transaction is delimited using an ``Atomic`` anonymous inner class. + +.. code-block:: java + + new Atomic() { + public Object atomically() { + // ... + } + }.execute(); + +All changes made to transactional objects are isolated from other changes, all make it or non make it (so failure atomicity) and are consistent. With the AkkaSTM you automatically have the Oracle version of the SERIALIZED isolation level, lower isolation is not possible. To make it fully serialized, set the writeskew property that checks if a writeskew problem is allowed to happen. + +Retries +------- + +A transaction is automatically retried when it runs into some read or write conflict, until the operation completes, an exception (throwable) is thrown or when there are too many retries. When a read or writeconflict is encountered, the transaction uses a bounded exponential backoff to prevent cause more contention and give other transactions some room to complete. + +If you are using non transactional resources in an atomic block, there could be problems because a transaction can be retried. If you are using print statements or logging, it could be that they are called more than once. So you need to be prepared to deal with this. One of the possible solutions is to work with a deferred or compensating task that is executed after the transaction aborts or commits. + +Unexpected retries +------------------ + +It can happen for the first few executions that you get a few failures of execution that lead to unexpected retries, even though there is not any read or writeconflict. The cause of this is that speculative transaction configuration/selection is used. There are transactions optimized for a single transactional object, for 1..n and for n to unlimited. So based on the execution of the transaction, the system learns; it begins with a cheap one and upgrades to more expensive ones. Once it has learned, it will reuse this knowledge. It can be activated/deactivated using the speculative property on the TransactionFactoryBuilder. In most cases it is best use the default value (enabled) so you get more out of performance. + +Coordinated transactions and Transactors +---------------------------------------- + +If you need coordinated transactions across actors or threads then see `Transactors `_. + +Configuring transactions +------------------------ + +It's possible to configure transactions. The ``Atomic`` class can take a ``TransactionFactory``, which can determine properties of the transaction. A default transaction factory is used if none is specified. You can create a ``TransactionFactory`` with a ``TransactionFactoryBuilder``. + +Configuring transactions with a ``TransactionFactory``: + +.. code-block:: java + + import akka.stm.*; + + TransactionFactory txFactory = new TransactionFactoryBuilder() + .setReadonly(true) + .build(); + + new Atomic(txFactory) { + public Object atomically() { + // read only transaction + return ...; + } + }.execute(); + +The following settings are possible on a TransactionFactory: +* familyName - Family name for transactions. Useful for debugging because the familyName is shown in exceptions, logging and in the future also will be used for profiling. +* readonly - Sets transaction as readonly. Readonly transactions are cheaper and can be used to prevent modification to transactional objects. +* maxRetries - The maximum number of times a transaction will retry. +* timeout - The maximum time a transaction will block for. +* trackReads - Whether all reads should be tracked. Needed for blocking operations. Readtracking makes a transaction more expensive, but makes subsequent reads cheaper and also lowers the chance of a readconflict. +* writeSkew - Whether writeskew is allowed. Disable with care. +* blockingAllowed - Whether explicit retries are allowed. +* interruptible - Whether a blocking transaction can be interrupted if it is blocked. +* speculative - Whether speculative configuration should be enabled. +* quickRelease - Whether locks should be released as quickly as possible (before whole commit). +* propagation - For controlling how nested transactions behave. +* traceLevel - Transaction trace level. + +You can also specify the default values for some of these options in akka.conf. Here they are with their default values: + +:: + + stm { + max-retries = 1000 + timeout = 10 + write-skew = true + blocking-allowed = false + interruptible = false + speculative = true + quick-release = true + propagation = requires + trace-level = none + } + +Transaction lifecycle listeners +------------------------------- + +It's possible to have code that will only run on the successful commit of a transaction, or when a transaction aborts. You can do this by adding ``deferred`` or ``compensating`` blocks to a transaction. + +.. code-block:: java + + import akka.stm.*; + import static akka.stm.StmUtils.deferred; + import static akka.stm.StmUtils.compensating; + + new Atomic() { + public Object atomically() { + deferred(new Runnable() { + public void run() { + // executes when transaction commits + } + }); + compensating(new Runnable() { + public void run() { + // executes when transaction aborts + } + }); + // ... + return something; + } + }.execute(); + +Blocking transactions +--------------------- + +You can block in a transaction until a condition is met by using an explicit ``retry``. To use ``retry`` you also need to configure the transaction to allow explicit retries. + +Here is an example of using ``retry`` to block until an account has enough money for a withdrawal. This is also an example of using actors and STM together. + +.. code-block:: java + + import akka.stm.*; + + public class Transfer { + public Ref from; + public Ref to; + public double amount; + + public Transfer(Ref from, Ref to, double amount) { + this.from = from; + this.to = to; + this.amount = amount; + } + } + +.. code-block:: java + + import akka.stm.*; + import static akka.stm.StmUtils.retry; + import akka.actor.*; + import akka.util.FiniteDuration; + import java.util.concurrent.TimeUnit; + + public class Transferer extends UntypedActor { + TransactionFactory txFactory = new TransactionFactoryBuilder() + .setBlockingAllowed(true) + .setTrackReads(true) + .setTimeout(new FiniteDuration(60, TimeUnit.SECONDS)) + .build(); + + public void onReceive(Object message) throws Exception { + if (message instanceof Transfer) { + Transfer transfer = (Transfer) message; + final Ref from = transfer.from; + final Ref to = transfer.to; + final double amount = transfer.amount; + new Atomic(txFactory) { + public Object atomically() { + if (from.get() < amount) { + System.out.println("Transferer: not enough money - retrying"); + retry(); + } + System.out.println("Transferer: transferring"); + from.set(from.get() - amount); + to.set(to.get() + amount); + return null; + } + }.execute(); + } + } + } + +.. code-block:: java + + import akka.stm.*; + import akka.actor.*; + + final Ref account1 = new Ref(100.0); + final Ref account2 = new Ref(100.0); + + ActorRef transferer = Actors.actorOf(Transferer.class).start(); + + transferer.sendOneWay(new Transfer(account1, account2, 500.0)); + // Transferer: not enough money - retrying + + new Atomic() { + public Object atomically() { + return account1.set(account1.get() + 2000); + } + }.execute(); + // Transferer: transferring + + Double acc1 = new Atomic() { + public Double atomically() { + return account1.get(); + } + }.execute(); + + Double acc2 = new Atomic() { + public Double atomically() { + return account2.get(); + } + }.execute(); + + System.out.println("Account 1: " + acc1); + // Account 1: 1600.0 + + System.out.println("Account 2: " + acc2); + // Account 2: 600.0 + + transferer.stop(); + +Alternative blocking transactions +--------------------------------- + +You can also have two alternative blocking transactions, one of which can succeed first, with ``EitherOrElse``. + +.. code-block:: java + + import akka.stm.*; + + public class Branch { + public Ref left; + public Ref right; + public int amount; + + public Branch(Ref left, Ref right, int amount) { + this.left = left; + this.right = right; + this.amount = amount; + } + } + +.. code-block:: java + + import akka.stm.*; + import static akka.stm.StmUtils.retry; + import akka.actor.*; + import akka.util.FiniteDuration; + import java.util.concurrent.TimeUnit; + + public class Brancher extends UntypedActor { + TransactionFactory txFactory = new TransactionFactoryBuilder() + .setBlockingAllowed(true) + .setTrackReads(true) + .setTimeout(new FiniteDuration(60, TimeUnit.SECONDS)) + .build(); + + public void onReceive(Object message) throws Exception { + if (message instanceof Branch) { + Branch branch = (Branch) message; + final Ref left = branch.left; + final Ref right = branch.right; + final double amount = branch.amount; + new Atomic(txFactory) { + public Integer atomically() { + return new EitherOrElse() { + public Integer either() { + if (left.get() < amount) { + System.out.println("not enough on left - retrying"); + retry(); + } + System.out.println("going left"); + return left.get(); + } + public Integer orElse() { + if (right.get() < amount) { + System.out.println("not enough on right - retrying"); + retry(); + } + System.out.println("going right"); + return right.get(); + } + }.execute(); + } + }.execute(); + } + } + } + +.. code-block:: java + + import akka.stm.*; + import akka.actor.*; + + final Ref left = new Ref(100); + final Ref right = new Ref(100); + + ActorRef brancher = Actors.actorOf(Brancher.class).start(); + + brancher.sendOneWay(new Branch(left, right, 500)); + // not enough on left - retrying + // not enough on right - retrying + + new Atomic() { + public Object atomically() { + return right.set(right.get() + 1000); + } + }.execute(); + // going right + + brancher.stop(); + +---- + +Transactional datastructures +============================ + +Akka provides two datastructures that are managed by the STM. +* TransactionalMap +* TransactionalVector + +TransactionalMap and TransactionalVector look like regular mutable datastructures, they even implement the standard Scala 'Map' and 'RandomAccessSeq' interfaces, but they are implemented using persistent datastructures and managed references under the hood. Therefore they are safe to use in a concurrent environment. Underlying TransactionalMap is HashMap, an immutable Map but with near constant time access and modification operations. Similarly TransactionalVector uses a persistent Vector. See the Persistent Datastructures section below for more details. + +Like managed references, TransactionalMap and TransactionalVector can only be modified inside the scope of an STM transaction. + +Here is an example of creating and accessing a TransactionalMap: + +.. code-block:: java + + import akka.stm.*; + + // assuming a User class + + final TransactionalMap users = new TransactionalMap(); + + // fill users map (in a transaction) + new Atomic() { + public Object atomically() { + users.put("bill", new User("bill")); + users.put("mary", new User("mary")); + users.put("john", new User("john")); + return null; + } + }.execute(); + + // access users map (in a transaction) + User user = new Atomic() { + public User atomically() { + return users.get("bill").get(); + } + }.execute(); + +Here is an example of creating and accessing a TransactionalVector: + +.. code-block:: java + + import akka.stm.*; + + // assuming an Address class + + final TransactionalVector
addresses = new TransactionalVector
(); + + // fill addresses vector (in a transaction) + new Atomic() { + public Object atomically() { + addresses.add(new Address("somewhere")); + addresses.add(new Address("somewhere else")); + return null; + } + }.execute(); + + // access addresses vector (in a transaction) + Address address = new Atomic
() { + public Address atomically() { + return addresses.get(0); + } + }.execute(); + +---- + +Persistent datastructures +========================= + +Akka's STM should only be used with immutable data. This can be costly if you have large datastructures and are using a naive copy-on-write. In order to make working with immutable datastructures fast enough Scala provides what are called Persistent Datastructures. There are currently two different ones: +* HashMap (`scaladoc `_) +* Vector (`scaladoc `_) + +They are immutable and each update creates a completely new version but they are using clever structural sharing in order to make them almost as fast, for both read and update, as regular mutable datastructures. + +This illustration is taken from Rich Hickey's presentation. Copyright Rich Hickey 2009. + +``_ + +---- + +JTA integration +=============== + +The STM has JTA (Java Transaction API) integration. This means that it will, if enabled, hook in to JTA and start a JTA transaction when the STM transaction is started. It will also rollback the STM transaction if the JTA transaction has failed and vice versa. This does not mean that the STM is made durable, if you need that you should use one of the `persistence modules `_. It simply means that the STM will participate and interact with and external JTA provider, for example send a message using JMS atomically within an STM transaction, or use Hibernate to persist STM managed data etc. + +Akka also has an API for using JTA explicitly. Read the `section on JTA `_ for details. + +You can enable JTA support in the 'stm' section in the config: + +:: + + stm { + jta-aware = off # 'on' means that if there JTA Transaction Manager available then the STM will + # begin (or join), commit or rollback the JTA transaction. Default is 'off'. + } + +You also have to configure which JTA provider to use etc in the 'jta' config section: + +``_ + jta { + provider = "from-jndi" # Options: "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI) + # "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta', + # e.g. you need the akka-jta JARs on classpath). + timeout = 60 + } +``_ diff --git a/akka-docs/pending/stm-scala.rst b/akka-docs/pending/stm-scala.rst new file mode 100644 index 0000000000..0e1249fc48 --- /dev/null +++ b/akka-docs/pending/stm-scala.rst @@ -0,0 +1,544 @@ +Software Transactional Memory (Scala) +===================================== + +Module stability: **SOLID** + +Overview of STM +=============== + +An `STM `_ turns the Java heap into a transactional data set with begin/commit/rollback semantics. Very much like a regular database. It implements the first three letters in ACID; ACI: +* Atomic +* Consistent +* Isolated + +Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: +# When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +# When you want to share a datastructure across actors. +# When you need to use the persistence modules. + +Akka’s STM implements the concept in `Clojure’s `_ STM view on state in general. Please take the time to read `this excellent document `_ and view `this presentation `_ by Rich Hickey (the genius behind Clojure), since it forms the basis of Akka’s view on STM and state in general. + +The STM is based on Transactional References (referred to as Refs). Refs are memory cells, holding an (arbitrary) immutable value, that implement CAS (Compare-And-Swap) semantics and are managed and enforced by the STM for coordinated changes across many Refs. They are implemented using the excellent `Multiverse STM `_. + +Working with immutable collections can sometimes give bad performance due to extensive copying. Scala provides so-called persistent datastructures which makes working with immutable collections fast. They are immutable but with constant time access and modification. The use of structural sharing and an insert or update does not ruin the old structure, hence “persistent”. Makes working with immutable composite types fast. The persistent datastructures currently consist of a Map and Vector. + +Simple example +============== + +Here is a simple example of an incremental counter using STM. This shows creating a ``Ref``, a transactional reference, and then modifying it within a transaction, which is delimited by ``atomic``. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(0) + + def counter = atomic { + ref alter (_ + 1) + } + + counter + // -> 1 + + counter + // -> 2 + +---- + +Ref +=== + +Refs (transactional references) are mutable references to values and through the STM allow the safe sharing of mutable data. Refs separate identity from value. To ensure safety the value stored in a Ref should be immutable (they can of course contain refs themselves). The value referenced by a Ref can only be accessed or swapped within a transaction. If a transaction is not available, the call will be executed in its own transaction (the call will be atomic). This is a different approach than the Clojure Refs, where a missing transaction results in an error. + +Creating a Ref +-------------- + +You can create a Ref with or without an initial value. + +.. code-block:: scala + + import akka.stm._ + + // giving an initial value + val ref = Ref(0) + + // specifying a type but no initial value + val ref = Ref[Int] + +Accessing the value of a Ref +---------------------------- + +Use ``get`` to access the value of a Ref. Note that if no initial value has been given then the value is initially ``null``. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(0) + + atomic { + ref.get + } + // -> 0 + +If there is a chance that the value of a Ref is null then you can use ``opt``, which will create an Option, either Some(value) or None, or you can provide a default value with ``getOrElse``. You can also check for null using ``isNull``. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref[Int] + + atomic { + ref.opt // -> None + ref.getOrElse(0) // -> 0 + ref.isNull // -> true + } + +Changing the value of a Ref +--------------------------- + +To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), which sets the new value and returns the old value. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(0) + + atomic { + ref.set(5) + } + // -> 0 + + atomic { + ref.get + } + // -> 5 + +You can also use ``alter`` which accepts a function that takes the old value and creates a new value of the same type. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(0) + + atomic { + ref alter (_ + 5) + } + // -> 5 + + val inc = (i: Int) => i + 1 + + atomic { + ref alter inc + } + // -> 6 + +Refs in for-comprehensions +-------------------------- + +Ref is monadic and can be used in for-comprehensions. + +.. code-block:: scala + + import akka.stm._ + + val ref = Ref(1) + + atomic { + for (value <- ref) { + // do something with value + } + } + + val anotherRef = Ref(3) + + atomic { + for { + value1 <- ref + value2 <- anotherRef + } yield (value1 + value2) + } + // -> Ref(4) + + val emptyRef = Ref[Int] + + atomic { + for { + value1 <- ref + value2 <- emptyRef + } yield (value1 + value2) + } + // -> Ref[Int] + +---- + +Transactions +============ + +A transaction is delimited using ``atomic``. + +.. code-block:: scala + + atomic { + // ... + } + +Coordinated transactions and Transactors +---------------------------------------- + +If you need coordinated transactions across actors or threads then see `Transactors `_. + +Configuring transactions +------------------------ + +It's possible to configure transactions. The ``atomic`` method can take an implicit or explicit ``TransactionFactory``, which can determine properties of the transaction. A default transaction factory is used if none is specified explicitly or there is no implicit ``TransactionFactory`` in scope. + +Configuring transactions with an **implicit** ``TransactionFactory``: + +.. code-block:: scala + + import akka.stm._ + + implicit val txFactory = TransactionFactory(readonly = true) + + atomic { + // read only transaction + } + +Configuring transactions with an **explicit** ``TransactionFactory``: + +.. code-block:: scala + + import akka.stm._ + + val txFactory = TransactionFactory(readonly = true) + + atomic(txFactory) { + // read only transaction + } + +The following settings are possible on a TransactionFactory: +* familyName - Family name for transactions. Useful for debugging. +* readonly - Sets transaction as readonly. Readonly transactions are cheaper. +* maxRetries - The maximum number of times a transaction will retry. +* timeout - The maximum time a transaction will block for. +* trackReads - Whether all reads should be tracked. Needed for blocking operations. +* writeSkew - Whether writeskew is allowed. Disable with care. +* blockingAllowed - Whether explicit retries are allowed. +* interruptible - Whether a blocking transaction can be interrupted. +* speculative - Whether speculative configuration should be enabled. +* quickRelease - Whether locks should be released as quickly as possible (before whole commit). +* propagation - For controlling how nested transactions behave. +* traceLevel - Transaction trace level. + +You can also specify the default values for some of these options in akka.conf. Here they are with their default values: + +:: + + stm { + max-retries = 1000 + timeout = 10 + write-skew = true + blocking-allowed = false + interruptible = false + speculative = true + quick-release = true + propagation = requires + trace-level = none + } + +You can also determine at which level a transaction factory is shared or not shared, which affects the way in which the STM can optimise transactions. + +Here is a shared transaction factory for all instances of an actor. + +.. code-block:: scala + + import akka.actor._ + import akka.stm._ + + object MyActor { + implicit val txFactory = TransactionFactory(readonly = true) + } + + class MyActor extends Actor { + import MyActor.txFactory + + def receive = { + case message: String => + atomic { + // read only transaction + } + } + } + +Here's a similar example with an individual transaction factory for each instance of an actor. + +.. code-block:: scala + + import akka.actor._ + import akka.stm._ + + class MyActor extends Actor { + implicit val txFactory = TransactionFactory(readonly = true) + + def receive = { + case message: String => + atomic { + // read only transaction + } + } + } + +Transaction lifecycle listeners +------------------------------- + +It's possible to have code that will only run on the successful commit of a transaction, or when a transaction aborts. You can do this by adding ``deferred`` or ``compensating`` blocks to a transaction. + +.. code-block:: scala + + import akka.stm._ + + atomic { + deferred { + // executes when transaction commits + } + compensating { + // executes when transaction aborts + } + } + +Blocking transactions +--------------------- + +You can block in a transaction until a condition is met by using an explicit ``retry``. To use ``retry`` you also need to configure the transaction to allow explicit retries. + +Here is an example of using ``retry`` to block until an account has enough money for a withdrawal. This is also an example of using actors and STM together. + +.. code-block:: scala + + import akka.stm._ + import akka.actor._ + import akka.util.duration._ + import akka.util.Logging + + type Account = Ref[Double] + + case class Transfer(from: Account, to: Account, amount: Double) + + class Transferer extends Actor with Logging { + implicit val txFactory = TransactionFactory(blockingAllowed = true, trackReads = true, timeout = 60 seconds) + + def receive = { + case Transfer(from, to, amount) => + atomic { + if (from.get < amount) { + log.info("not enough money - retrying") + retry + } + log.info("transferring") + from alter (_ - amount) + to alter (_ + amount) + } + } + } + + val account1 = Ref(100.0) + val account2 = Ref(100.0) + + val transferer = Actor.actorOf(new Transferer).start() + + transferer ! Transfer(account1, account2, 500.0) + // INFO Transferer: not enough money - retrying + + atomic { account1 alter (_ + 2000) } + // INFO Transferer: transferring + + atomic { account1.get } + // -> 1600.0 + + atomic { account2.get } + // -> 600.0 + + transferer.stop() + +Alternative blocking transactions +--------------------------------- + +You can also have two alternative blocking transactions, one of which can succeed first, with ``either-orElse``. + +.. code-block:: scala + + import akka.stm._ + import akka.actor._ + import akka.util.duration._ + import akka.util.Logging + + case class Branch(left: Ref[Int], right: Ref[Int], amount: Int) + + class Brancher extends Actor with Logging { + implicit val txFactory = TransactionFactory(blockingAllowed = true, trackReads = true, timeout = 60 seconds) + + def receive = { + case Branch(left, right, amount) => + atomic { + either { + if (left.get < amount) { + log.info("not enough on left - retrying") + retry + } + log.info("going left") + } orElse { + if (right.get < amount) { + log.info("not enough on right - retrying") + retry + } + log.info("going right") + } + } + } + } + + val ref1 = Ref(0) + val ref2 = Ref(0) + + val brancher = Actor.actorOf(new Brancher).start() + + brancher ! Branch(ref1, ref2, 1) + // INFO Brancher: not enough on left - retrying + // INFO Brancher: not enough on right - retrying + + atomic { ref2 alter (_ + 1) } + // INFO Brancher: not enough on left - retrying + // INFO Brancher: going right + + brancher.stop() + +---- + +Transactional datastructures +============================ + +Akka provides two datastructures that are managed by the STM. +* TransactionalMap +* TransactionalVector + +TransactionalMap and TransactionalVector look like regular mutable datastructures, they even implement the standard Scala 'Map' and 'RandomAccessSeq' interfaces, but they are implemented using persistent datastructures and managed references under the hood. Therefore they are safe to use in a concurrent environment. Underlying TransactionalMap is HashMap, an immutable Map but with near constant time access and modification operations. Similarly TransactionalVector uses a persistent Vector. See the Persistent Datastructures section below for more details. + +Like managed references, TransactionalMap and TransactionalVector can only be modified inside the scope of an STM transaction. + +*IMPORTANT*: There have been some problems reported when using transactional datastructures with 'lazy' initialization. Avoid that. + +Here is how you create these transactional datastructures: + +.. code-block:: scala + + import akka.stm._ + + // assuming something like + case class User(name: String) + case class Address(location: String) + + // using initial values + val map = TransactionalMap("bill" -> User("bill")) + val vector = TransactionalVector(Address("somewhere")) + + // specifying types + val map = TransactionalMap[String, User] + val vector = TransactionalVector[Address] + +TransactionalMap and TransactionalVector wrap persistent datastructures with transactional references and provide a standard Scala interface. This makes them convenient to use. + +Here is an example of using a Ref and a HashMap directly: + +.. code-block:: scala + + import akka.stm._ + import scala.collection.immutable.HashMap + + case class User(name: String) + + val ref = Ref(HashMap[String, User]()) + + atomic { + val users = ref.get + val newUsers = users + ("bill" -> User("bill")) // creates a new HashMap + ref.swap(newUsers) + } + + atomic { + ref.get.apply("bill") + } + // -> User("bill") + +Here is the same example using TransactionalMap: + +.. code-block:: scala + + import akka.stm._ + + case class User(name: String) + + val users = TransactionalMap[String, User] + + atomic { + users += "bill" -> User("bill") + } + + atomic { + users("bill") + } + // -> User("bill") + +---- + +Persistent datastructures +========================= + +Akka's STM should only be used with immutable data. This can be costly if you have large datastructures and are using a naive copy-on-write. In order to make working with immutable datastructures fast enough Scala provides what are called Persistent Datastructures. There are currently two different ones: +* HashMap (`scaladoc `_) +* Vector (`scaladoc `_) + +They are immutable and each update creates a completely new version but they are using clever structural sharing in order to make them almost as fast, for both read and update, as regular mutable datastructures. + +This illustration is taken from Rich Hickey's presentation. Copyright Rich Hickey 2009. + +``_ + +---- + +JTA integration +=============== + +The STM has JTA (Java Transaction API) integration. This means that it will, if enabled, hook in to JTA and start a JTA transaction when the STM transaction is started. It will also rollback the STM transaction if the JTA transaction has failed and vice versa. This does not mean that the STM is made durable, if you need that you should use one of the `persistence modules `_. It simply means that the STM will participate and interact with and external JTA provider, for example send a message using JMS atomically within an STM transaction, or use Hibernate to persist STM managed data etc. + +Akka also has an API for using JTA explicitly. Read the `section on JTA `_ for details. + +You can enable JTA support in the 'stm' section in the config: + +:: + + stm { + jta-aware = off # 'on' means that if there JTA Transaction Manager available then the STM will + # begin (or join), commit or rollback the JTA transaction. Default is 'off'. + } + +You also have to configure which JTA provider to use etc in the 'jta' config section: + +:: + + jta { + provider = "from-jndi" # Options: "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI) + # "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta', + # e.g. you need the akka-jta JARs on classpath). + timeout = 60 + } + +---- + +Ants simulation sample +====================== + +One fun and very enlightening visual demo of STM, actors and transactional references is the `Ant simulation sample `_. I encourage you to run it and read through the code since it's a good example of using actors with STM. diff --git a/akka-docs/pending/stm.rst b/akka-docs/pending/stm.rst new file mode 100644 index 0000000000..c84ca4e6bb --- /dev/null +++ b/akka-docs/pending/stm.rst @@ -0,0 +1,60 @@ +Akka STM + +The Akka Software Transactional Memory implementation + +**Read consistency** +^^^^^^^^^^^^^^^^^^^^ + +Read consistency is that all value + +**Read concistency and MVCC** +***************************** + +A lot of STM (like the Clojure STM) implementations are Multi Version Concurrency Control Based (MVCC) based (TL2 of david dice could be seen as MVCC). + +To provide read consistency, every ref is augmented with a version field (a long). There also is a logical clock (an AtomicLong for instance) that is incremented every time a transaction does a commit (there are some optimizations) and on all refs written, the version of the ref is updated to this new clock value. + +If a transaction begins, it reads the current version of the clock and makes sure that the version of the refs it reads, are equal or lower than the version of the transaction. If the transaction encounters a ref with a higher value, the transaction is aborted and retried. + +MVCC STM’s are relatively simple to write and have some very nice properties: +# readers don’t block writers +# writers don’t block readers +# persistent data-structures are very easy to write since a log can be added to each ref containing older versions of the data, + +The problem with MVCC however is that the central clock forms a contention point that makes independent transactional data-structures not linearly scalable. todo: give example of scalability with MVCC. + +So even if you have 2 Threads having their private transactional Ref (so there is no visible contention), underwater the transaction still are going to contend for the clock. + +**Read consistency and the Akka STM** +************************************* + +The AkkaSTM (that is build on top of the Multiverse 0.7 STM) and from Akka 1.1 it doesn’t use a MVCC based implementation because of the scalability limiting central clock. + +It uses 2 different mechanisms: +1) For very short transactions it does a full conflict scan every time a new ref is read. Doing a full conflict scan sounds expensive, but it only involves volatile reads. +2) For longer transactions it uses semi visible reads. Every time a read is done, the surplus of readers is incremented and stored in the ref. Once the transaction aborts or commits, the surplus is lowered again. If a transaction does an update, and sees that there is a surplus of readers, it increments a conflict counter. This conflict counter is checked every time a transaction reads a new ref. If it hasn’t changed, no full conflict scan is needed. If it has changed, a full conflict scan is required. If a conflict is detected, the transaction is aborted and retried. This technique is called a semi visible read (we don’t know which transactions are possibly going to encounter a conflict, but we do know if there is at least one possible conflict). + +There are 2 important optimizations to this design: +# Eager full conflict scan +# Read biases refs + +**Eager full conflict scan** +**************************** + +The reasons why short transactions always do a full conflict scan is that doing semi visible reads, relies doing more expensive synchronization operations (e.g. doing a cas to increase the surplus of readers, or doing a cas to decrease it). + +**Read biased vs update biased.** +********************************* + +The problem with semi visible reads is that certain structures (e.g. the root of a tree) can form a contention point (because of the arrives/departs) even though it mostly is read. To reduce contention, a ref can become read biased after a certain number of reads by transactions that use semi visible reads is done. Once it has become read biased, no arrives and departs are required any more, but once it the Ref is updated it will always increment the conflict counter because it doesn’t know if there are any conflicting readers. + +Visible reads, semi visible reads +Read tracking + +strict isolation +eager conflict detection +deferred write, no dirty read possible + +isolation level +optimistic +various levels of pessimistic behavior diff --git a/akka-docs/pending/team.rst b/akka-docs/pending/team.rst new file mode 100644 index 0000000000..cdc97244bd --- /dev/null +++ b/akka-docs/pending/team.rst @@ -0,0 +1,22 @@ +Team +===== + +|| **Name** || **Role** || **Email** || +|| Jonas Bonér || Founder, Despot, Committer || jonas AT jonasboner DOT com || +|| Viktor Klang || Bad cop, Committer || viktor DOT klang AT gmail DOT com || +|| Debasish Ghosh || Committer || dghosh AT acm DOT org || +|| Ross McDonald || Alumni || rossajmcd AT gmail DOT com || +|| Eckhart Hertzler || Alumni || || +|| Mikael Högqvist || Alumni || || +|| Tim Perrett || Alumni || || +|| Jeanfrancois Arcand || Alumni || jfarcand AT apache DOT org || +|| Martin Krasser || Committer || krasserm AT googlemail DOT com || +|| Jan Van Besien || Alumni || || +|| Michael Kober || Committer || || +|| Peter Vlugter || Committer || || +|| Peter Veentjer || Committer || || +|| Irmo Manie || Committer || || +|| Heiko Seeberger || Committer || || +|| Hiram Chirino || Committer || || +|| Scott Clasen || Committer || || +|| Roland Kuhn || Committer || || diff --git a/akka-docs/pending/test.rst b/akka-docs/pending/test.rst new file mode 100644 index 0000000000..c845cb36d2 --- /dev/null +++ b/akka-docs/pending/test.rst @@ -0,0 +1,55 @@ +Testing of Akka +=============== + +Introduction +============ + +Testing concurrent code using time-outs (like Thread.sleep(..)) is usually a bad idea since it is both slow and error-prone. There are some frameworks that can help, some are listed below. + +Testing Actor Interaction +========================= + +For Actor interaction, making sure certain message arrives in time etc. we recommend you use Akka's built-in `TestKit `_. If you want to roll your own, you will find helpful abstractions in the `java.util.concurrent` package, most notably `BlockingQueue` and `CountDownLatch`. + +Unit testing of Actors +====================== + +If you need to unit test your actors then the best way to do that would be to decouple it from the Actor by putting it in a regular class/trait, test that, and then mix in the Actor trait when you want to create actors. This is necessary since you can't instantiate an Actor class directly with 'new'. But note that you can't test Actor interaction with this, but only local Actor implementation. Here is an example: + +.. code-block:: scala + + // test this + class MyLogic { + def blabla: Unit = { + ... + } + } + + // run this + actorOf(new MyLogic with Actor { + def receive = { + case Bla => blabla + } + }) + +...or define a non-anonymous MyLogicActor class. + +Akka Expect +=========== + +Expect mimic for testing Akka actors. + +``_ + +Awaitility +========== + +Not a Akka specific testing framework but a nice DSL for testing asynchronous code. +Scala and Java API. + +``_ + +ScalaTest Conductor +=================== + +``_ diff --git a/akka-docs/pending/testkit-example.rst b/akka-docs/pending/testkit-example.rst new file mode 100644 index 0000000000..611ba4dea6 --- /dev/null +++ b/akka-docs/pending/testkit-example.rst @@ -0,0 +1,138 @@ +Ray Roestenburg's example code from `his blog `_. +``_ +package unit.akka + +import org.scalatest.matchers.ShouldMatchers +import org.scalatest.{WordSpec, BeforeAndAfterAll} +import akka.actor.Actor._ +import akka.util.duration._ +import akka.util.TestKit +import java.util.concurrent.TimeUnit +import akka.actor.{ActorRef, Actor} +import util.Random + +/** + * a Test to show some TestKit examples + */ + +class TestKitUsageSpec extends WordSpec with BeforeAndAfterAll with ShouldMatchers with TestKit { + val echoRef = actorOf(new EchoActor).start() + val forwardRef = actorOf(new ForwardingActor(testActor)).start() + val filterRef = actorOf(new FilteringActor(testActor)).start() + val randomHead = Random.nextInt(6) + val randomTail = Random.nextInt(10) + val headList = List().padTo(randomHead, "0") + val tailList = List().padTo(randomTail, "1") + val seqRef = actorOf(new SequencingActor(testActor, headList, tailList)).start() + + override protected def afterAll(): scala.Unit = { + stopTestActor + echoRef.stop() + forwardRef.stop() + filterRef.stop() + seqRef.stop() + } + + "An EchoActor" should { + "Respond with the same message it receives" in { + within(100 millis) { + echoRef ! "test" + expectMsg("test") + } + } + } + "A ForwardingActor" should { + "Forward a message it receives" in { + within(100 millis) { + forwardRef ! "test" + expectMsg("test") + } + } + } + "A FilteringActor" should { + "Filter all messages, except expected messagetypes it receives" in { + var messages = List[String]() + within(100 millis) { + filterRef ! "test" + expectMsg("test") + filterRef ! 1 + expectNoMsg + filterRef ! "some" + filterRef ! "more" + filterRef ! 1 + filterRef ! "text" + filterRef ! 1 + + receiveWhile(500 millis) { + case msg: String => messages = msg :: messages + } + } + messages.length should be(3) + messages.reverse should be(List("some", "more", "text")) + } + } + "A SequencingActor" should { + "receive an interesting message at some point " in { + within(100 millis) { + seqRef ! "something" + ignoreMsg { + case msg: String => msg != "something" + } + expectMsg("something") + ignoreMsg { + case msg: String => msg == "1" + } + expectNoMsg + } + } + } +} + +/** + * An Actor that echoes everything you send to it + */ +class EchoActor extends Actor { + def receive = { + case msg => { + self.reply(msg) + } + } +} + +/** + * An Actor that forwards every message to a next Actor + */ +class ForwardingActor(next: ActorRef) extends Actor { + def receive = { + case msg => { + next ! msg + } + } +} + +/** + * An Actor that only forwards certain messages to a next Actor + */ +class FilteringActor(next: ActorRef) extends Actor { + def receive = { + case msg: String => { + next ! msg + } + case _ => None + } +} + +/** + * An actor that sends a sequence of messages with a random head list, an interesting value and a random tail list + * The idea is that you would like to test that the interesting value is received and that you cant be bothered with the rest + */ +class SequencingActor(next: ActorRef, head: List[String], tail: List[String]) extends Actor { + def receive = { + case msg => { + head map (next ! _) + next ! msg + tail map (next ! _) + } + } +} +``_ diff --git a/akka-docs/pending/testkit.rst b/akka-docs/pending/testkit.rst new file mode 100644 index 0000000000..d2d177948f --- /dev/null +++ b/akka-docs/pending/testkit.rst @@ -0,0 +1,49 @@ +Actor TestKit +============= + +Module Stability: **In Progress** + +Overview +-------- + +Testing actors comprises several aspects, which can have different weight according to the concrete project at hand: +* If you have a collection of actors which performs a certain function, you may want to apply defined stimuli and observe the delivery of the desired result messages to a test actor; in this case the ***TestKit*** trait will likely interest you. +* If you encounter undesired behaviour (exceptions, dead-locks) and want to nail down the cause, it might help to run the actors in question using the ***CallingThreadDispatcher***; this dispatcher is strictly less powerful than the general purpose ones, but its deterministic behaviour and complete message stack can help debugging, unless your setup depends on concurrent execution for correctness. +* For real unit tests of one actor body at a time, there soon will be a special ***TestActorRef*** which allows access to the innards and enables running without a dispatcher. + +TestKit +------- + +The TestKit is a trait which you can mix into your test class to setup a test harness consisting of an test actor, which is implicitly available as sender reference, methods for querying and asserting features of messages received by said actor, and finally methods which provide a DSL for timing assertions. + +Ray Roestenburg has written a great article on using the TestKit: ``_. Here is a short teaser: + +.. code-block:: scala + + class SomeSpec extends WordSpec with MustMatchers with TestKit { + + val worker = actorOf(...) + + "A Worker" must { + "send timely replies" in { + within (50 millis) { + worker ! "some work" + expectMsg("some result") + expectNoMsg + } + } + } + } + +His full example is also available `here `_. + +CallingThreadDispatcher +----------------------- + +This special purpose dispatcher was conceived to enable collection of the full stack trace accumulated during processing of a complete message chain. The idea is to run invocations always on the calling thread, except when the target actor is already running on the current thread; in that case it is necessary to queue the invocation and run it after the current invocation on that actor has finished processing. This design implies that any invocation which blocks waiting on some future action to be done by the current thread will dead-lock. Hence, the CallingThreadDispatcher offers strictly more possibilities to dead-lock than a standard dispatcher. + +One nice property is that this feature can help verify that your design is dead-lock free: if you run only on this dispatcher and utilitze only one thread, then a successful run implies that for the given set of inputs there cannot be a dead-lock. (This is unfortunately not a hard guarantee, as long as your actor behavior depends on the dispatcher used, e.g. you could sabotage it by explicitly dead-locking only if self.dispatcher != CallingThreadDispatcher.) + +TestActorRef (coming soon ...) +------------------------------ + diff --git a/akka-docs/pending/third-party-integrations.rst b/akka-docs/pending/third-party-integrations.rst new file mode 100644 index 0000000000..579c3123d0 --- /dev/null +++ b/akka-docs/pending/third-party-integrations.rst @@ -0,0 +1,21 @@ +Third-party Integrations +======================== + +The Play! Framework +=================== + +Dustin Whitney has done an Akka integration module for the `Play! framework `_. + +Detailed instructions here: ``_. + +There are three screencasts: +# Using Play! with Akka STM: ``_ +# Using Play! with Akka Actors: ``_ +# Using Play! with Akka Remote Actors: ``_ + +The Pinky REST/MVC Framework +============================ + +Peter Hausel has done an Akka integration module for the `Pinky framework `_. + +Read more here: ``_ diff --git a/akka-docs/pending/transactors-java.rst b/akka-docs/pending/transactors-java.rst new file mode 100644 index 0000000000..6547063703 --- /dev/null +++ b/akka-docs/pending/transactors-java.rst @@ -0,0 +1,265 @@ +**Transactors (Java)** +============================================================ + +Module stability: **SOLID** + +Why Transactors? +================ + +Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see `this JavaOne presentation `_. + +**STM** on the other hand is excellent for problems where you need consensus and a stable view of the state by providing compositional transactional shared state. Some of the really nice traits of STM are that transactions compose, and it raises the abstraction level from lock-based concurrency. + +Akka's Transactors combine Actors and STM to provide the best of the Actor model (concurrency and asynchronous event-based programming) and STM (compositional transactional shared state) by providing transactional, compositional, asynchronous, event-based message flows. + +If you need Durability then you should not use one of the in-memory data structures but one of the persistent ones. + +Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: +# When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +# When you want to share a datastructure across actors. +# When you need to use the persistence modules. + +Actors and STM +-------------- + +You can combine Actors and STM in several ways. An Actor may use STM internally so that particular changes are guaranteed to be atomic. Actors may also share transactional datastructures as the STM provides safe shared state across threads. + +It's also possible to coordinate transactions across Actors or threads so that either the transactions in a set all commit successfully or they all fail. This is the focus of Transactors and the explicit support for coordinated transactions in this section. + +---- + +Coordinated transactions +======================== + +Akka provides an explicit mechanism for coordinating transactions across actors. Under the hood it uses a ``CountDownCommitBarrier``, similar to a CountDownLatch. + +Here is an example of coordinating two simple counter UntypedActors so that they both increment together in coordinated transactions. If one of them was to fail to increment, the other would also fail. + +.. code-block:: java + + import akka.actor.ActorRef; + + public class Increment { + private ActorRef friend = null; + + public Increment() {} + + public Increment(ActorRef friend) { + this.friend = friend; + } + + public boolean hasFriend() { + return friend != null; + } + + public ActorRef getFriend() { + return friend; + } + } + +.. code-block:: java + + import akka.actor.ActorRef; + import akka.actor.UntypedActor; + import static akka.actor.Actors.*; + import akka.stm.Ref; + import akka.transactor.Atomically; + import akka.transactor.Coordinated; + + public class Counter extends UntypedActor { + private Ref count = new Ref(0); + + private void increment() { + count.set(count.get() + 1); + } + + public void onReceive(Object incoming) throws Exception { + if (incoming instanceof Coordinated) { + Coordinated coordinated = (Coordinated) incoming; + Object message = coordinated.getMessage(); + if (message instanceof Increment) { + Increment increment = (Increment) message; + if (increment.hasFriend()) { + increment.getFriend().sendOneWay(coordinated.coordinate(new Increment())); + } + coordinated.atomic(new Atomically() { + public void atomically() { + increment(); + } + }); + } + } else if (incoming instanceof String) { + String message = (String) incoming; + if (message.equals("GetCount")) { + getContext().replyUnsafe(count.get()); + } + } + } + } + +.. code-block:: java + + ActorRef counter1 = actorOf(Counter.class).start(); + ActorRef counter2 = actorOf(Counter.class).start(); + + counter1.sendOneWay(new Coordinated(new Increment(counter2))); + +To start a new coordinated transaction set that you will also participate in, just create a ``Coordinated`` object: + +.. code-block:: java + + Coordinated coordinated = new Coordinated(); + +To start a coordinated transaction that you won't participate in yourself you can create a ``Coordinated`` object with a message and send it directly to an actor. The recipient of the message will be the first member of the coordination set: + +.. code-block:: java + + actor.sendOneWay(new Coordinated(new Message())); + +To include another actor in the same coordinated transaction set that you've created or received, use the ``coordinate`` method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. + +.. code-block:: java + + actor.sendOneWay(coordinated.coordinate(new Message())); + +To enter the coordinated transaction use the atomic method of the coordinated object. This accepts either an ``akka.transactor.Atomically`` object, or an ``Atomic`` object the same as used normally in the STM (just don't execute it - the coordination will do that). + +.. code-block:: java + + coordinated.atomic(new Atomically() { + public void atomically() { + // do something in a transaction + } + }); + +The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. + +---- + +UntypedTransactor +================= + +UntypedTransactors are untyped actors that provide a general pattern for coordinating transactions, using the explicit coordination described above. + +Here's an example of a simple untyped transactor that will join a coordinated transaction: + +.. code-block:: java + + import akka.transactor.UntypedTransactor; + + public class Counter extends UntypedTransactor { + Ref count = new Ref(0); + + public void atomically(Object message) { + if (message instanceof Increment) { + count.set(count.get() + 1); + } + } + } + +You could send this Counter transactor a ``Coordinated(Increment)`` message. If you were to send it just an ``Increment`` message it will create its own ``Coordinated`` (but in this particular case wouldn't be coordinating transactions with any other transactors). + +To coordinate with other transactors override the ``coordinate`` method. The ``coordinate`` method maps a message to a set of ``SendTo`` objects, pairs of ``ActorRef`` and a message. You can use the ``include`` and ``sendTo`` methods to easily coordinate with other transactors. + +Example of coordinating an increment, similar to the explicitly coordinated example: + +.. code-block:: java + + import akka.transactor.UntypedTransactor; + import akka.transactor.SendTo; + import akka.stm.Ref; + + import java.util.Set; + + public class Counter extends UntypedTransactor { + Ref count = new Ref(0); + + @Override public Set coordinate(Object message) { + if (message instanceof Increment) { + Increment increment = (Increment) message; + if (increment.hasFriend()) + return include(increment.getFriend(), new Increment()); + } + return nobody(); + } + + public void atomically(Object message) { + if (message instanceof Increment) { + count.set(count.get() + 1); + } + } + } + +To exeucte directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. These methods also expect partial functions like the receive method. They do not execute within the transaction. + +To completely bypass coordinated transactions override the ``normally`` method. Any message matched by ``normally`` will not be matched by the other methods, and will not be involved in coordinated transactions. In this method you can implement normal actor behavior, or use the normal STM atomic for local transactions. + +---- + +Coordinating Typed Actors +========================= + +It's also possible to use coordinated transactions with typed actors. You can explicitly pass around ``Coordinated`` objects, or use built-in support with the ``@Coordinated`` annotation and the ``Coordination.coordinate`` method. + +To specify a method should use coordinated transactions add the ``@Coordinated`` annotation. **Note**: the ``@Coordinated`` annotation will only work with void (one-way) methods. + +.. code-block:: java + + public interface Counter { + @Coordinated public void increment(); + public Integer get(); + } + +To coordinate transactions use a ``coordinate`` block. This accepts either an ``akka.transactor.Atomically`` object, or an ``Atomic`` object liked used in the STM (but don't execute it). The first boolean parameter specifies whether or not to wait for the transactions to complete. + +.. code-block:: java + + Coordination.coordinate(true, new Atomically() { + public void atomically() { + counter1.increment(); + counter2.increment(); + } + }); + +Here's an example of using ``@Coordinated`` with a TypedActor to coordinate increments: + +.. code-block:: java + + import akka.transactor.annotation.Coordinated; + + public interface Counter { + @Coordinated public void increment(); + public Integer get(); + } + +.. code-block:: java + + import akka.actor.TypedActor; + import akka.stm.Ref; + + public class CounterImpl extends TypedActor implements Counter { + private Ref count = new Ref(0); + + public void increment() { + count.set(count.get() + 1); + } + + public Integer get() { + return count.get(); + } + } + +``_ +Counter counter1 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); +Counter counter2 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); + +Coordination.coordinate(true, new Atomically() { + public void atomically() { + counter1.increment(); + counter2.increment(); + } +}); + +TypedActor.stop(counter1); +TypedActor.stop(counter2); +``_ diff --git a/akka-docs/pending/transactors-scala.rst b/akka-docs/pending/transactors-scala.rst new file mode 100644 index 0000000000..454fffb6a6 --- /dev/null +++ b/akka-docs/pending/transactors-scala.rst @@ -0,0 +1,244 @@ +**Transactors (Scala)** +============================================================= + +Module stability: **SOLID** + +Why Transactors? +================ + +Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see `this JavaOne presentation `_. + +**STM** on the other hand is excellent for problems where you need consensus and a stable view of the state by providing compositional transactional shared state. Some of the really nice traits of STM are that transactions compose, and it raises the abstraction level from lock-based concurrency. + +Akka's Transactors combine Actors and STM to provide the best of the Actor model (concurrency and asynchronous event-based programming) and STM (compositional transactional shared state) by providing transactional, compositional, asynchronous, event-based message flows. + +If you need Durability then you should not use one of the in-memory data structures but one of the persistent ones. + +Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: +# When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +# When you want to share a datastructure across actors. +# When you need to use the persistence modules. + +Actors and STM +-------------- + +You can combine Actors and STM in several ways. An Actor may use STM internally so that particular changes are guaranteed to be atomic. Actors may also share transactional datastructures as the STM provides safe shared state across threads. + +It's also possible to coordinate transactions across Actors or threads so that either the transactions in a set all commit successfully or they all fail. This is the focus of Transactors and the explicit support for coordinated transactions in this section. + +---- + +Coordinated transactions +======================== + +Akka provides an explicit mechanism for coordinating transactions across Actors. Under the hood it uses a ``CountDownCommitBarrier``, similar to a CountDownLatch. + +Here is an example of coordinating two simple counter Actors so that they both increment together in coordinated transactions. If one of them was to fail to increment, the other would also fail. + +.. code-block:: scala + + import akka.transactor.Coordinated + import akka.stm.Ref + import akka.actor.{Actor, ActorRef} + + case class Increment(friend: Option[ActorRef] = None) + case object GetCount + + class Counter extends Actor { + val count = Ref(0) + + def receive = { + case coordinated @ Coordinated(Increment(friend)) => { + friend foreach (_ ! coordinated(Increment())) + coordinated atomic { + count alter (_ + 1) + } + } + case GetCount => self.reply(count.get) + } + } + + val counter1 = Actor.actorOf[Counter].start() + val counter2 = Actor.actorOf[Counter].start() + + counter1 ! Coordinated(Increment(Some(counter2))) + + ... + + counter1 !! GetCount // Some(1) + + counter1.stop() + counter2.stop() + +To start a new coordinated transaction set that you will also participate in, just create a ``Coordinated`` object: + +.. code-block:: scala + + val coordinated = Coordinated() + +To start a coordinated transaction that you won't participate in yourself you can create a ``Coordinated`` object with a message and send it directly to an actor. The recipient of the message will be the first member of the coordination set: + +.. code-block:: scala + + actor ! Coordinated(Message) + +To receive a coordinated message in an actor simply match it in a case statement: + +.. code-block:: scala + + def receive = { + case coordinated @ Coordinated(Message) => ... + } + +To include another actor in the same coordinated transaction set that you've created or received, use the apply method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. + +.. code-block:: scala + + actor ! coordinated(Message) + +To enter the coordinated transaction use the atomic method of the coordinated object: + +.. code-block:: scala + + coordinated atomic { + // do something in transaction ... + } + +The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. + +---- + +Transactor +========== + +Transactors are actors that provide a general pattern for coordinating transactions, using the explicit coordination described above. + +Here's an example of a simple transactor that will join a coordinated transaction: + +.. code-block:: scala + + import akka.transactor.Transactor + import akka.stm.Ref + + case object Increment + + class Counter extends Transactor { + val count = Ref(0) + + def atomically = { + case Increment => count alter (_ + 1) + } + } + +You could send this Counter transactor a ``Coordinated(Increment)`` message. If you were to send it just an ``Increment`` message it will create its own ``Coordinated`` (but in this particular case wouldn't be coordinating transactions with any other transactors). + +To coordinate with other transactors override the ``coordinate`` method. The ``coordinate`` method maps a message to a set of ``SendTo`` objects, pairs of ``ActorRef`` and a message. You can use the ``include`` and ``sendTo`` methods to easily coordinate with other transactors. The ``include`` method will send on the same message that was received to other transactors. The ``sendTo`` method allows you to specify both the actor to send to, and the message to send. + +Example of coordinating an increment: + +.. code-block:: scala + + import akka.transactor.Transactor + import akka.stm.Ref + + case object Increment + + class FriendlyCounter(friend: ActorRef) extends Transactor { + val count = Ref(0) + + override def coordinate = { + case Increment => include(friend) + } + + def atomically = { + case Increment => count alter (_ + 1) + } + } + +Using ``include`` to include more than one transactor: + +.. code-block:: scala + + override def coordinate = { + case Message => include(actor1, actor2, actor3) + } + +Using ``sendTo`` to coordinate transactions but pass-on a different message than the one that was received: + +.. code-block:: scala + + override def coordinate = { + case Message => sendTo(someActor -> SomeOtherMessage) + case SomeMessage => sendTo(actor1 -> Message1, actor2 -> Message2) + } + +To exeucte directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. These methods also expect partial functions like the receive method. They do not execute within the transaction. + +To completely bypass coordinated transactions override the ``normally`` method. Any message matched by ``normally`` will not be matched by the other methods, and will not be involved in coordinated transactions. In this method you can implement normal actor behavior, or use the normal STM atomic for local transactions. + +---- + +Coordinating Typed Actors +========================= + +It's also possible to use coordinated transactions with typed actors. You can explicitly pass around ``Coordinated`` objects, or use built-in support with the ``@Coordinated`` annotation and the ``Coordination.coordinate`` method. + +To specify a method should use coordinated transactions add the ``@Coordinated`` annotation. **Note**: the ``@Coordinated`` annotation only works with methods that return Unit (one-way methods). + +.. code-block:: scala + + trait Counter { + @Coordinated def increment: Unit + def get: Int + } + +To coordinate transactions use a ``coordinate`` block: + +.. code-block:: scala + + coordinate { + counter1.increment + counter2.increment + } + +Here's an example of using ``@Coordinated`` with a TypedActor to coordinate increments. + +.. code-block:: scala + + import akka.actor.TypedActor + import akka.stm.Ref + import akka.transactor.annotation.Coordinated + import akka.transactor.Coordination._ + + trait Counter { + @Coordinated def increment: Unit + def get: Int + } + + class CounterImpl extends TypedActor with Counter { + val ref = Ref(0) + def increment = ref alter (_ + 1) + def get = ref.get + } + + ... + + val counter1 = TypedActor.newInstance(classOf[Counter], classOf[CounterImpl]) + val counter2 = TypedActor.newInstance(classOf[Counter], classOf[CounterImpl]) + + coordinate { + counter1.increment + counter2.increment + } + + TypedActor.stop(counter1) + TypedActor.stop(counter2) + +The ``coordinate`` block will wait for the transactions to complete. If you do not want to wait then you can specify this explicitly: + +``_ +coordinate(wait = false) { + counter1.increment + counter2.increment +} +``_ diff --git a/akka-docs/pending/tutorial-chat-server-java.rst b/akka-docs/pending/tutorial-chat-server-java.rst new file mode 100644 index 0000000000..4f0daaa0de --- /dev/null +++ b/akka-docs/pending/tutorial-chat-server-java.rst @@ -0,0 +1,7 @@ +Tutorial: write a scalable, fault-tolerant, persistent network chat server and client (Java) +============================================================================================ + +Here is a couple of ports of the Scala API chat sample application in the `Scala tutorial `_. + +``_ +``_ diff --git a/akka-docs/pending/tutorial-chat-server-scala.rst b/akka-docs/pending/tutorial-chat-server-scala.rst new file mode 100644 index 0000000000..9d35abddd9 --- /dev/null +++ b/akka-docs/pending/tutorial-chat-server-scala.rst @@ -0,0 +1,531 @@ +Tutorial: write a scalable, fault-tolerant, persistent network chat server and client (Scala) +============================================================================================= + +Introduction +------------ + +`Tutorial source code `_. + +Writing correct concurrent, fault-tolerant and scalable applications is too hard. Most of the time it's because we are using the wrong tools and the wrong level of abstraction. + +`Akka `_ is an attempt to change that. + +Akka uses the Actor Model together with Software Transactional Memory to raise the abstraction level and provide a better platform to build correct concurrent and scalable applications. + +For fault-tolerance Akka adopts the "Let it crash", also called "Embrace failure", model which has been used with great success in the telecom industry to build applications that self-heal, systems that never stop. + +Actors also provides the abstraction for transparent distribution and the basis for truly scalable and fault-tolerant applications. + +Akka is Open Source and available under the Apache 2 License. + +In this article we will introduce you to Akka and see how we can utilize it to build a highly concurrent, scalable and fault-tolerant network server. + +But first let's take a step back and discuss what Actors really are and what they are useful for. + +Actors +------ + +`The Actor Model `_ provides a higher level of abstraction for writing concurrent and distributed systems. It alleviates the developer from having to deal with explicit locking and thread management. It makes it easier to write correct concurrent and parallel systems. Actors are really nothing new, they were defined in the 1963 paper by Carl Hewitt and have been popularized by the Erlang language which emerged in the mid 80s. It has been used by for example at Ericsson with great success to build highly concurrent and extremely reliable (99.9999999 % availability - 31 ms/year downtime) telecom systems. + +Actors encapsulate state and behavior into a lightweight process/thread. In a sense they are like OO objects but with a major semantic difference; they *do not* share state with any other Actor. Each Actor has its own view of the world and can only have impact on other Actors by sending messages to them. Messages are sent asynchronously and non-blocking in a so-called "fire-and-forget" manner where the Actor sends off a message to some other Actor and then do not wait for a reply but goes off doing other things or are suspended by the runtime. Each Actor has a mailbox (ordered message queue) in which incoming messages are processed one by one. Since all processing is done asynchronously and Actors do not block and consume any resources while waiting for messages, Actors tend to give very good concurrency and scalability characteristics and are excellent for building event-based systems. + +Creating Actors +--------------- + +Akka has both a `Scala API `_ and a `Java API `_. In this article we will only look at the Scala API since that is the most expressive one. The article assumes some basic Scala knowledge, but even if you don't know Scala I don't think it will not be too hard to follow along anyway. + +Akka has adopted the same style of writing Actors as Erlang in which each Actor has an explicit message handler which does pattern matching to match on the incoming messages. + +Actors can be created either by: +* Extending the 'Actor' class and implementing the 'receive' method. +* Create an anonymous Actor using one of the 'actor' methods. + +Here is a little example before we dive into a more interesting one. + +.. code-block:: scala + + class MyActor extends Actor { + def receive = { + case "test" => println("received test") + case _ => println("received unknown message") + } + } + + val myActor = Actor.actorOf[MyActor] + myActor.start() + +From this call we get a handle to the 'Actor' called 'ActorRef', which we can use to interact with the Actor + +The 'actorOf' factory method can be imported like this: + +.. code-block:: scala + + import akka.actor.Actor.actorOf + + val a = actorOf[MyActor] + +From now on we will assume that it is imported like this and can use it directly. + +Akka Actors are extremely lightweight. Each Actor consume ~600 bytes, which means that you can create 6.5 million on 4 G RAM. + +Messages are sent using the '!' operator: + +.. code-block:: scala + + myActor ! "test" + +Sample application +------------------ + +We will try to write a simple chat/IM system. It is client-server based and uses remote Actors to implement remote clients. Even if it is not likely that you will ever write a chat system I think that it can be a useful exercise since it uses patterns and idioms found in many other use-cases and domains. + +We will use many of the features of Akka along the way. In particular; Actors, fault-tolerance using Actor supervision, remote Actors, Software Transactional Memory (STM) and persistence. + +But let's start by defining the messages that will flow in our system. + +Creating messages +----------------- + +It is very important that all messages that will be sent around in the system are immutable. The Actor model relies on the simple fact that no state is shared between Actors and the only way to guarantee that is to make sure we don't pass mutable state around as part of the messages. + +In Scala we have something called `case classes `_. These make excellent messages since they are both immutable and great to pattern match on. + +Let's now start by creating the messages that will flow in our system. + +.. code-block:: scala + + sealed trait Event + case class Login(user: String) extends Event + case class Logout(user: String) extends Event + case class GetChatLog(from: String) extends Event + case class ChatLog(log: List[String]) extends Event + case class ChatMessage(from: String, message: String) extends Event + +As you can see with these messages we can log in and out, send a chat message and ask for and get a reply with all the messages in the chat log so far. + +Client: Sending messages +------------------------ + +Our client wraps each message send in a function, making it a bit easier to use. Here we assume that we have a reference to the chat service so we can communicate with it by sending messages. Messages are sent with the '!' operator (pronounced "bang"). This sends a message of asynchronously and do not wait for a reply. + +Sometimes however, there is a need for sequential logic, sending a message and wait for the reply before doing anything else. In Akka we can achieve that using the '!!' ("bangbang") operator. When sending a message with '!!' we do not return immediately but wait for a reply using a `Future `_. A 'Future' is a promise that we will get a result later but with the difference from regular method dispatch that the OS thread we are running on is put to sleep while waiting and that we can set a time-out for how long we wait before bailing out, retrying or doing something else. The '!!' function returns a `scala.Option `_ which implements the `Null Object pattern `_. It has two subclasses; 'None' which means no result and 'Some(value)' which means that we got a reply. The 'Option' class has a lot of great methods to work with the case of not getting a defined result. F.e. as you can see below we are using the 'getOrElse' method which will try to return the result and if there is no result defined invoke the "...OrElse" statement. + +.. code-block:: scala + + class ChatClient(val name: String) { + val chat = Actor.remote.actorFor("chat:service", "localhost", 2552) + + def login = chat ! Login(name) + def logout = chat ! Logout(name) + def post(message: String) = chat ! ChatMessage(name, name + ": " + message) + def chatLog = (chat !! GetChatLog(name)).as[ChatLog].getOrElse(throw new Exception("Couldn't get the chat log from ChatServer")) + } + +As you can see, we are using the 'Actor.remote.actorFor' to lookup the chat server on the remote node. From this call we will get a handle to the remote instance and can use it as it is local. + +Session: Receiving messages +--------------------------- + +Now we are done with the client side and let's dig into the server code. We start by creating a user session. The session is an Actor and is defined by extending the 'Actor' trait. This trait has one abstract method that we have to define; 'receive' which implements the message handler for the Actor. + +In our example the session has state in the form of a 'List' with all the messages sent by the user during the session. In takes two parameters in its constructor; the user name and a reference to an Actor implementing the persistent message storage. For both of the messages it responds to, 'ChatMessage' and 'GetChatLog', it passes them on to the storage Actor. + +If you look closely (in the code below) you will see that when passing on the 'GetChatLog' message we are not using '!' but 'forward'. This is similar to '!' but with the important difference that it passes the original sender reference, in this case to the storage Actor. This means that the storage can use this reference to reply to the original sender (our client) directly. + +.. code-block:: scala + + class Session(user: String, storage: ActorRef) extends Actor { + private val loginTime = System.currentTimeMillis + private var userLog: List[String] = Nil + + EventHandler.info(this, "New session for user [%s] has been created at [%s]".format(user, loginTime)) + + def receive = { + case msg @ ChatMessage(from, message) => + userLog ::= message + storage ! msg + + case msg @ GetChatLog(_) => + storage forward msg + } + } + +Let it crash: Implementing fault-tolerance +------------------------------------------ + +Akka's `approach to fault-tolerance `_; the "let it crash" model, is implemented by linking Actors. It is very different to what Java and most non-concurrency oriented languages/frameworks have adopted. It’s a way of dealing with failure that is designed for concurrent and distributed systems. + +If we look at concurrency first. Now let’s assume we are using non-linked Actors. Throwing an exception in concurrent code, will just simply blow up the thread that currently executes the Actor. There is no way to find out that things went wrong (apart from see the stack trace in the log). There is nothing you can do about it. Here linked Actors provide a clean way of both getting notification of the error so you know what happened, as well as the Actor that crashed, so you can do something about it. + +Linking Actors allow you to create sets of Actors where you can be sure that either: + +* All are dead +* All are alive + +This is very useful when you have hundreds of thousands of concurrent Actors. Some Actors might have implicit dependencies and together implement a service, computation, user session etc. for these being able to group them is very nice. + +Akka encourages non-defensive programming. Don’t try to prevent things from go wrong, because they will, whether you want it or not. Instead; expect failure as a natural state in the life-cycle of your app, crash early and let someone else (that sees the whole picture), deal with it. + +Now let’s look at distributed Actors. As you probably know, you can’t build a fault-tolerant system with just one single node, but you need at least two. Also, you (usually) need to know if one node is down and/or the service you are talking to on the other node is down. Here Actor supervision/linking is a critical tool for not only monitoring the health of remote services, but to actually manage the service, do something about the problem if the Actor or node is down. This could be restarting him on the same node or on another node. + +To sum things up, it is a very different way of thinking but a way that is very useful (if not critical) to building fault-tolerant highly concurrent and distributed applications. + +Supervisor hierarchies +---------------------- + +A supervisor is a regular Actor that is responsible for starting, stopping and monitoring its child Actors. The basic idea of a supervisor is that it should keep its child Actors alive by restarting them when necessary. This makes for a completely different view on how to write fault-tolerant servers. Instead of trying all things possible to prevent an error from happening, this approach embraces failure. It shifts the view to look at errors as something natural and something that will happen and instead of trying to prevent it; embrace it. Just "let it crash" and reset the service to a stable state through restart. + +Akka has two different restart strategies; All-For-One and One-For-One. + +* OneForOne: Restart only the component that has crashed. +* AllForOne: Restart all the components that the supervisor is managing, including the one that have crashed. + +The latter strategy should be used when you have a certain set of components that are coupled in some way that if one is crashing they all need to be reset to a stable state before continuing. + +Chat server: Supervision, Traits and more +----------------------------------------- + +There are two ways you can define an Actor to be a supervisor; declaratively and dynamically. In this example we use the dynamic approach. There are two things we have to do: + +* Define the fault handler by setting the 'faultHandler' member field to the strategy we want. +* Define the exceptions we want to "trap", e.g. which exceptions should be handled according to the fault handling strategy we have defined. This in done by setting the 'trapExit' member field to a 'List' with all exceptions we want to trap. + +The last thing we have to do to supervise Actors (in our example the storage Actor) is to 'link' the Actor. Invoking 'link(actor)' will create a link between the Actor passed as argument into 'link' and ourselves. This means that we will now get a notification if the linked Actor is crashing and if the cause of the crash, the exception, matches one of the exceptions in our 'trapExit' list then the crashed Actor is restarted according the the fault handling strategy defined in our 'faultHandler'. We also have the 'unlink(actor)' function which disconnects the linked Actor from the supervisor. + +In our example we are using a method called 'spawnLink(actor)' which creates, starts and links the Actor in an atomic operation. The linking and unlinking is done in 'preStart' and 'postStop' callback methods which are invoked by the runtime when the Actor is started and shut down (shutting down is done by invoking 'actor.stop()'). In these methods we initialize our Actor, by starting and linking the storage Actor and clean up after ourselves by shutting down all the user session Actors and the storage Actor. + +That is it. Now we have implemented the supervising part of the fault-tolerance for the storage Actor. But before we dive into the 'ChatServer' code there are some more things worth mentioning about its implementation. + +It defines an abstract member field holding the 'ChatStorage' implementation the server wants to use. We do not define that in the 'ChatServer' directly since we want to decouple it from the actual storage implementation. + +The 'ChatServer' is a 'trait', which is Scala's version of mixins. A mixin can be seen as an interface with an implementation and is a very powerful tool in Object-Oriented design that makes it possible to design the system into small, reusable, highly cohesive, loosely coupled parts that can be composed into larger object and components structures. + +I'll try to show you how we can make use Scala's mixins to decouple the Actor implementation from the business logic of managing the user sessions, routing the chat messages and storing them in the persistent storage. Each of these separate parts of the server logic will be represented by its own trait; giving us four different isolated mixins; 'Actor', 'SessionManagement', 'ChatManagement' and 'ChatStorageFactory' This will give us as loosely coupled system with high cohesion and reusability. At the end of the article I'll show you how you can compose these mixins into a the complete runtime component we like. + +.. code-block:: scala + + /** + * Chat server. Manages sessions and redirects all other messages to the Session for the client. + */ + trait ChatServer extends Actor { + self.faultHandler = OneForOneStrategy(List(classOf[Exception]),5, 5000) + val storage: ActorRef + + EventHandler.info(this, "Chat server is starting up...") + + // actor message handler + def receive: Receive = sessionManagement orElse chatManagement + + // abstract methods to be defined somewhere else + protected def chatManagement: Receive + protected def sessionManagement: Receive + protected def shutdownSessions(): Unit + + override def postStop = { + EventHandler.info(this, "Chat server is shutting down...") + shutdownSessions + self.unlink(storage) + storage.stop() + } + } + +If you look at the 'receive' message handler function you can see that we have defined it but instead of adding our logic there we are delegating to two different functions; 'sessionManagement' and 'chatManagement', chaining them with 'orElse'. These two functions are defined as abstract in our 'ChatServer' which means that they have to be provided by some another mixin or class when we instantiate our 'ChatServer'. Naturally we will put the 'sessionManagement' implementation in the 'SessionManagement' trait and the 'chatManagement' implementation in the 'ChatManagement' trait. First let's create the 'SessionManagement' trait. + +Chaining partial functions like this is a great way of composing functionality in Actors. You can for example put define one default message handle handling generic messages in the base Actor and then let deriving Actors extend that functionality by defining additional message handlers. There is a section on how that is done `here `_. + +Session management +------------------ + +The session management is defined in the 'SessionManagement' trait in which we implement the two abstract methods in the 'ChatServer'; 'sessionManagement' and 'shutdownSessions'. + +The 'SessionManagement' trait holds a 'HashMap' with all the session Actors mapped by user name as well as a reference to the storage (to be able to pass it in to each newly created 'Session'). + +The 'sessionManagement' function performs session management by responding to the 'Login' and 'Logout' messages. For each 'Login' message it creates a new 'Session' Actor, starts it and puts it in the 'sessions' Map and for each 'Logout' message it does the opposite; shuts down the user's session and removes it from the 'sessions' Map. + +The 'shutdownSessions' function simply shuts all the sessions Actors down. That completes the user session management. + +.. code-block:: scala + + /** + * Implements user session management. + *

+ * Uses self-type annotation (this: Actor =>) to declare that it needs to be mixed in with an Actor. + */ + trait SessionManagement { this: Actor => + + val storage: ActorRef // needs someone to provide the ChatStorage + val sessions = new HashMap[String, ActorRef] + + protected def sessionManagement: Receive = { + case Login(username) => + EventHandler.info(this, "User [%s] has logged in".format(username)) + val session = actorOf(new Session(username, storage)) + session.start() + sessions += (username -> session) + + case Logout(username) => + EventHandler.info(this, "User [%s] has logged out".format(username)) + val session = sessions(username) + session.stop() + sessions -= username + } + + protected def shutdownSessions = + sessions.foreach { case (_, session) => session.stop() } + } + +Chat message management +----------------------- + +Chat message management is implemented by the 'ChatManagement' trait. It has an abstract 'HashMap' session member field with all the sessions. Since it is abstract it needs to be mixed in with someone that can provide this reference. If this dependency is not resolved when composing the final component, you will get a compilation error. + +It implements the 'chatManagement' function which responds to two different messages; 'ChatMessage' and 'GetChatLog'. It simply gets the session for the user (the sender of the message) and routes the message to this session. Here we also use the 'forward' function to make sure the original sender reference is passed along to allow the end receiver to reply back directly. + +.. code-block:: scala + + /** + * Implements chat management, e.g. chat message dispatch. + *

+ * Uses self-type annotation (this: Actor =>) to declare that it needs to be mixed in with an Actor. + */ + trait ChatManagement { this: Actor => + val sessions: HashMap[String, ActorRef] // needs someone to provide the Session map + + protected def chatManagement: Receive = { + case msg @ ChatMessage(from, _) => getSession(from).foreach(_ ! msg) + case msg @ GetChatLog(from) => getSession(from).foreach(_ forward msg) + } + + private def getSession(from: String) : Option[ActorRef] = { + if (sessions.contains(from)) + Some(sessions(from)) + else { + EventHandler.info(this, "Session expired for %s".format(from)) + None + } + } + } + +Using an Actor as a message broker, as in this example, is a very common pattern with many variations; load-balancing, master/worker, map/reduce, replication, logging etc. It becomes even more useful with remote Actors when we can use it to route messages to different nodes. + +STM and Transactors +------------------- + +Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the Actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see this `presentation `_. + +`Software Transactional Memory `_ (STM) on the other hand is excellent for problems where you need consensus and a stable view of the state by providing compositional transactional shared state. Some of the really nice traits of STM are that transactions compose and that it raises the abstraction level from lock-based concurrency. + +Akka has a `STM implementation `_ that is based on the same ideas as found in the `Clojure language `_; Managed References working with immutable data. + +Akka allows you to combine Actors and STM into what we call `Transactors `_ (short for Transactional Actors), these allow you to optionally combine Actors and STM provides IMHO the best of the Actor model (simple concurrency and asynchronous event-based programming) and STM (compositional transactional shared state) by providing transactional, compositional, asynchronous, event-based message flows. You don't need Transactors all the time but when you do need them then you *really need* them. + +Akka currently provides three different transactional abstractions; 'Map', 'Vector' and 'Ref'. They can be shared between multiple Actors and they are managed by the STM. You are not allowed to modify them outside a transaction, if you do so, an exception will be thrown. + +What you get is transactional memory in which multiple Actors are allowed to read and write to the same memory concurrently and if there is a clash between two transactions then both of them are aborted and retried. Aborting a transaction means that the memory is rolled back to the state it were in when the transaction was started. + +In database terms STM gives you 'ACI' semantics; 'Atomicity', 'Consistency' and 'Isolation'. The 'D' in 'ACID'; 'Durability', you can't get with an STM since it is in memory. This however is addressed by the persistence module in Akka. + +Persistence: Storing the chat log +--------------------------------- + +Akka modules provides the possibility of taking the transactional data structures we discussed above and making them persistent. It is an extension to the STM which guarantees that it has the same semantics. + +The `persistence module `_ has pluggable storage back-ends. + +They all implement persistent 'Map', 'Vector' and 'Ref'. Which can be created and retrieved by id through one of the storage modules. + +.. code-block:: scala + + val map = RedisStorage.newMap(id) + val vector = CassandraStorage.newVector(id) + val ref = MongoStorage.newRef(id) + +Chat storage: Backed with simple in-memory +------------------------------------------ + +To keep it simple we implement the persistent storage, with a in-memory Vector, i.e. it will not be persistent. We start by creating a 'ChatStorage' trait allowing us to have multiple different storage backend. For example one in-memory and one persistent. + +.. code-block:: scala + + /** + * Abstraction of chat storage holding the chat log. + */ + trait ChatStorage extends Actor + +Our 'MemoryChatStorage' extends the 'ChatStorage' trait. The only state it holds is the 'chatLog' which is a transactional 'Vector'. + +It responds to two different messages; 'ChatMessage' and 'GetChatLog'. The 'ChatMessage' message handler takes the 'message' attribute and appends it to the 'chatLog' vector. Here you can see that we are using the 'atomic { ... }' block to run the vector operation in a transaction. For this in-memory storage it is not important to use a transactional Vector, since it is not shared between actors, but it illustrates the concept. + +The 'GetChatLog' message handler retrieves all the messages in the chat log storage inside an atomic block, iterates over them using the 'map' combinator transforming them from 'Array[Byte] to 'String'. Then it invokes the 'reply(message)' function that will send the chat log to the original sender; the 'ChatClient'. + +You might rememeber that the 'ChatServer' was supervising the 'ChatStorage' actor. When we discussed that we showed you the supervising Actor's view. Now is the time for the supervised Actor's side of things. First, a supervised Actor need to define a life-cycle in which it declares if it should be seen as a: + +* 'Permanent': which means that the actor will always be restarted. +* 'Temporary': which means that the actor will not be restarted, but it will be shut down through the regular shutdown process so the 'postStop' callback function will called. + +We define the 'MemoryChatStorage' as 'Permanent' by setting the 'lifeCycle' member field to 'Permanent'. + +The idea with this crash early style of designing your system is that the services should just crash and then they should be restarted and reset into a stable state and continue from there. The definition of "stable state" is domain specific and up to the application developer to define. Akka provides two callback functions; 'preRestart' and 'postRestart' that are called right *before* and right *after* the Actor is restarted. Both of these functions take a 'Throwable', the reason for the crash, as argument. In our case we just need to implement the 'postRestart' hook and there re-initialize the 'chatLog' member field with a fresh 'Vector'. + +.. code-block:: scala + + /** + * Memory-backed chat storage implementation. + */ + class MemoryChatStorage extends ChatStorage { + self.lifeCycle = Permanent + + private var chatLog = TransactionalVector[Array[Byte]]() + + EventHandler.info(this, "Memory-based chat storage is starting up...") + + def receive = { + case msg @ ChatMessage(from, message) => + EventHandler.debug(this, "New chat message [%s]".format(message)) + atomic { chatLog + message.getBytes("UTF-8") } + + case GetChatLog(_) => + val messageList = atomic { chatLog.map(bytes => new String(bytes, "UTF-8")).toList } + self.reply(ChatLog(messageList)) + } + + override def postRestart(reason: Throwable) = chatLog = TransactionalVector() + } + +The last thing we need to do in terms of persistence is to create a 'MemoryChatStorageFactory' that will take care of instantiating and resolving the 'val storage: ChatStorage' field in the 'ChatServer' with a concrete implementation of our persistence Actor. + +.. code-block:: scala + + /** + * Creates and links a MemoryChatStorage. + */ + trait MemoryChatStorageFactory { this: Actor => + val storage = this.self.spawnLink[MemoryChatStorage] // starts and links ChatStorage + } + +Composing the full Chat Service +------------------------------- + +We have now created the full functionality for the chat server, all nicely decoupled into isolated and well-defined traits. Now let's bring all these traits together and compose the complete concrete 'ChatService'. + +.. code-block:: scala + + /** + * Class encapsulating the full Chat Service. + * Start service by invoking: + *

+   * val chatService = Actor.actorOf[ChatService].start()
+   * 
+ */ + class ChatService extends + ChatServer with + SessionManagement with + ChatManagement with + MemoryChatStorageFactory { + override def preStart = { + remote.start("localhost", 2552); + remote.register("chat:service", self) //Register the actor with the specified service id + } + } + +Creating a remote server service +-------------------------------- + +As you can see in the section above, we are overriding the Actor's 'start' method and are starting up a remote server node by invoking 'remote.start("localhost", 2552)'. This starts up the remote node on address "localhost" and port 2552 which means that it accepts incoming messages on this address. Then we register the ChatService actor in the remote node by invoking 'remote.register("chat:service", self)'. This means that the ChatService will be available to other actors on this specific id, address and port. + +That's it. Were done. Now we have a, very simple, but scalable, fault-tolerant, event-driven, persistent chat server that can without problem serve a million concurrent users on a regular workstation. + +Let's use it. + +Sample client chat session +-------------------------- + +Now let's create a simple test runner that logs in posts some messages and logs out. + +.. code-block:: scala + + /** + * Test runner emulating a chat session. + */ + object ClientRunner { + + def run = { + val client1 = new ChatClient("jonas") + client1.login + val client2 = new ChatClient("patrik") + client2.login + + client1.post("Hi there") + println("CHAT LOG:\n\t" + client1.chatLog.log.mkString("\n\t")) + + client2.post("Hello") + println("CHAT LOG:\n\t" + client2.chatLog.log.mkString("\n\t")) + + client1.post("Hi again") + println("CHAT LOG:\n\t" + client1.chatLog.log.mkString("\n\t")) + + client1.logout + client2.logout + } + } + +Sample code +----------- + +All this code is available as part of the Akka distribution. It resides in the './akka-samples/akka-sample-chat' module and have a 'README' file explaining how to run it. + +Or if you rather browse it `online `_. + +Run it +------ + +Download and build Akka + +#. Check out Akka from ``_ +#. Set 'AKKA_HOME' environment variable to the root of the Akka distribution. +#. Open up a shell and step into the Akka distribution root folder. +#. Build Akka by invoking: + +:: + + % sbt update + % sbt dist + +Run a sample chat session + +1. Fire up two shells. For each of them: + + - Step down into to the root of the Akka distribution. + - Set 'export AKKA_HOME=. + - Run 'sbt console' to start up a REPL (interpreter). + +2. In the first REPL you get execute: + +.. code-block:: scala + + import sample.chat._ + import akka.actor.Actor._ + val chatService = actorOf[ChatService].start() + +3. In the second REPL you get execute: + +.. code-block:: scala + + import sample.chat._ + ClientRunner.run + +4. See the chat simulation run. + +5. Run it again to see full speed after first initialization. + +6. In the client REPL, or in a new REPL, you can also create your own client + +.. code-block:: scala + + import sample.chat._ + val myClient = new ChatClient("") + myClient.login + myClient.post("Can I join?") + println("CHAT LOG:\n\t" + myClient.chatLog.log.mkString("\n\t")) + +That's it. Have fun. diff --git a/akka-docs/pending/typed-actors-java.rst b/akka-docs/pending/typed-actors-java.rst new file mode 100644 index 0000000000..2322698ed1 --- /dev/null +++ b/akka-docs/pending/typed-actors-java.rst @@ -0,0 +1,191 @@ +Typed Actors (Java) +=================== + +Module stability: **SOLID** + +The Typed Actors are implemented through `Typed Actors `_. It uses AOP through `AspectWerkz `_ to turn regular POJOs into asynchronous non-blocking Actors with semantics of the Actor Model. E.g. each message dispatch is turned into a message that is put on a queue to be processed by the Typed Actor sequentially one by one. + +If you are using the `Spring Framework `_ then take a look at Akka's `Spring integration `_. + +Creating Typed Actors +--------------------- + +**IMPORTANT:** The Typed Actors class must have access modifier 'public' and can't be a non-static inner class. + +Akka turns POJOs with interface and implementation into asynchronous (Typed) Actors. Akka is using `AspectWerkz’s Proxy `_ implementation, which is the `most performant `_ proxy implementation there exists. + +In order to create a Typed Actor you have to subclass the TypedActor base class. + +Here is an example. + +If you have a POJO with an interface implementation separation like this: + +.. code-block:: java + + interface RegistrationService { + void register(User user, Credentials cred); + User getUserFor(String username); + } + +.. code-block:: java + + import akka.actor.TypedActor; + + public class RegistrationServiceImpl extends TypedActor implements RegistrationService { + public void register(User user, Credentials cred) { + ... // register user + } + + public User getUserFor(String username) { + ... // fetch user by username + return user; + } + } + +Then you can create an Typed Actor out of it by creating it through the 'TypedActor' factory like this: + +.. code-block:: java + + RegistrationService service = + (RegistrationService) TypedActor.newInstance(RegistrationService.class, RegistrationServiceImpl.class, 1000); + // The last parameter defines the timeout for Future calls + +**Creating Typed Actors with non-default constructor** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To create a typed actor that takes constructor arguments use a variant of 'newInstance' or 'newRemoteInstance' that takes an instance of a 'TypedActorFactory' in which you can create the TypedActor in any way you like. If you use this method then make sure that no one can get a reference to the actor instance. Touching actor state directly is bypassing the whole actor dispatching mechanism and create race conditions which can lead to corrupt data. + +Here is an example: + +.. code-block:: java + + Service service = TypedActor.newInstance(classOf[Service], new TypedActorFactory() { + public TypedActor create() { + return new ServiceWithConstructorArgsImpl("someString", 500L)); + }); + +Configuration factory class +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using a configuration object: + +.. code-block:: java + + import static java.util.concurrent.TimeUnit.MILLISECONDS; + import akka.actor.TypedActorConfiguration; + import akka.util.FiniteDuration; + + TypedActorConfiguration config = new TypedActorConfiguration() + .timeout(new FiniteDuration(3000, MILLISECONDS)); + + RegistrationService service = (RegistrationService) TypedActor.newInstance(RegistrationService.class, config); + +However, often you will not use these factory methods but declaratively define the Typed Actors as part of a supervisor hierarchy. More on that in the `Fault Tolerance `_ section. + +Sending messages +---------------- + +Messages are sent simply by invoking methods on the POJO, which is proxy to the "real" POJO now. The arguments to the method are bundled up atomically into an message and sent to the receiver (the actual POJO instance). + +One-way message send +^^^^^^^^^^^^^^^^^^^^ + +Methods that return void are turned into ‘fire-and-forget’ semantics by asynchronously firing off the message and return immediately. In the example above it would be the 'register' method, so if this method is invoked then it returns immediately: + +.. code-block:: java + + // method invocation returns immediately and method is invoke asynchronously using the Actor Model semantics + service.register(user, creds); + +Request-reply message send +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Methods that return something (e.g. non-void methods) are turned into ‘send-and-recieve-eventually’ semantics by asynchronously firing off the message and wait on the reply using a Future. + +.. code-block:: java + + // method invocation is asynchronously dispatched using the Actor Model semantics, + // but it blocks waiting on a Future to be resolved in the background + User user = service.getUser(username); + +Generally it is preferred to use fire-forget messages as much as possible since they will never block, e.g. consume a resource by waiting. But sometimes they are neat to use since they: +# Simulates standard Java method dispatch, which is more intuitive for most Java developers +# Are a neat to model request-reply +# Are useful when you need to do things in a defined order + +The same holds for the 'request-reply-with-future' described below. + +Request-reply-with-future message send +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Methods that return a 'akka.dispatch.Future' are turned into ‘send-and-recieve-with-future’ semantics by asynchronously firing off the message and returns immediately with a Future. You need to use the 'future(...)' method in the TypedActor base class to resolve the Future that the client code is waiting on. + +Here is an example: + +.. code-block:: java + + public class MathTypedActorImpl extends TypedActor implements MathTypedActor { + public Future square(int value) { + return future(value * value); + } + } + + MathTypedActor math = TypedActor.actorOf(MathTypedActor .class, MathTypedActorImpl.class); + + // This method will return immediately when called, caller should wait on the Future for the result + Future future = math.square(10); + future.await(); + Integer result = future.get(); + +Stopping Typed Actors +--------------------- + +Once Typed Actors have been created with one of the TypedActor.newInstance methods they need to be stopped with TypedActor.stop to free resources allocated by the created Typed Actor (this is not needed when the Typed Actor is `supervised `_). + +.. code-block:: java + + // Create Typed Actor + RegistrationService service = (RegistrationService) TypedActor.newInstance(RegistrationService.class); + + // ... + + // Free Typed Actor resources + TypedActor.stop(service); + +When the Typed Actor defines a `shutdown callback `_ method it will be invoked on TypedActor.stop. + +How to use the TypedActorContext for runtime information access +--------------------------------------------------------------- + +The 'akka.actor.TypedActorContext' class Holds 'runtime type information' (RTTI) for the Typed Actor. This context is a member field in the TypedActor base class and holds for example the current sender reference, the current sender future etc. + +Here is an example how you can use it to in a 'void' (e.g. fire-forget) method to implement request-reply by using the sender reference: + +.. code-block:: java + + class PingImpl implements Ping extends TypedActor { + + public void hit(int count) { + Pong pong = (Pong) getContext().getSender(); + pong.hit(count++); + } + } + +If the sender, sender future etc. is not available, then these methods will return 'null' so you should have a way of dealing with scenario. + +Messages and immutability +------------------------- + +**IMPORTANT**: Messages can be any kind of object but have to be immutable (there is a workaround, see next section). Java or Scala can’t enforce immutability (yet) so this has to be by convention. Primitives like String, int, Long are always immutable. Apart from these you have to create your own immutable objects to send as messages. If you pass on a reference to an instance that is mutable then this instance can be modified concurrently by two different Typed Actors and the Actor model is broken leaving you with NO guarantees and most likely corrupt data. + +Akka can help you in this regard. It allows you to turn on an option for serializing all messages, e.g. all parameters to the Typed Actor effectively making a deep clone/copy of the parameters. This will make sending mutable messages completely safe. This option is turned on in the ‘$AKKA_HOME/config/akka.conf’ config file like this: + +.. code-block:: ruby + + akka { + actor { + serialize-messages = on # does a deep clone of messages to ensure immutability + } + } + +This will make a deep clone (using Java serialization) of all parameters. diff --git a/akka-docs/pending/typed-actors-scala.rst b/akka-docs/pending/typed-actors-scala.rst new file mode 100644 index 0000000000..3d03cc93b1 --- /dev/null +++ b/akka-docs/pending/typed-actors-scala.rst @@ -0,0 +1,171 @@ +Typed Actors (Scala) +==================== + +Module stability: **SOLID** + +The Typed Actors are implemented through `Typed Actors `_. It uses AOP through `AspectWerkz `_ to turn regular POJOs into asynchronous non-blocking Actors with semantics of the Actor Model. E.g. each message dispatch is turned into a message that is put on a queue to be processed by the Typed Actor sequentially one by one. + +If you are using the `Spring Framework `_ then take a look at Akka's `Spring integration `_. + +Creating Typed Actors +--------------------- + +**IMPORTANT:** The Typed Actors class must have access modifier 'public' (which is default) and can't be an inner class (unless it is an inner class in an 'object'). + +Akka turns POJOs with interface and implementation into asynchronous (Typed) Actors. Akka is using `AspectWerkz’s Proxy `_ implementation, which is the `most performant `_ proxy implementation there exists. + +In order to create a Typed Actor you have to subclass the TypedActor base class. + +Here is an example. + +If you have a POJO with an interface implementation separation like this: + +.. code-block:: scala + + import akka.actor.TypedActor + + trait RegistrationService { + def register(user: User, cred: Credentials): Unit + def getUserFor(username: String): User + } + +.. code-block:: scala + + public class RegistrationServiceImpl extends TypedActor with RegistrationService { + def register(user: User, cred: Credentials): Unit = { + ... // register user + } + + def getUserFor(username: String): User = { + ... // fetch user by username + user + } + } + +Then you can create an Typed Actor out of it by creating it through the 'TypedActor' factory like this: + +.. code-block:: scala + + val service = TypedActor.newInstance(classOf[RegistrationService], classOf[RegistrationServiceImpl], 1000) + // The last parameter defines the timeout for Future calls + +**Creating Typed Actors with non-default constructor** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To create a typed actor that takes constructor arguments use a variant of 'newInstance' or 'newRemoteInstance' that takes a call-by-name block in which you can create the Typed Actor in any way you like. + +Here is an example: + +.. code-block:: scala + + val service = TypedActor.newInstance(classOf[Service], new ServiceWithConstructorArgs("someString", 500L)) + +Configuration factory class +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using a configuration object: + +.. code-block:: scala + import akka.actor.TypedActorConfiguration + import akka.util.Duration + import akka.util.duration._ + + val config = TypedActorConfiguration() + .timeout(3000 millis) + + val service = TypedActor.newInstance(classOf[RegistrationService], classOf[RegistrationServiceImpl], config) + +However, often you will not use these factory methods but declaratively define the Typed Actors as part of a supervisor hierarchy. More on that in the `Fault Tolerance `_ section. + +Sending messages +---------------- + +Messages are sent simply by invoking methods on the POJO, which is proxy to the "real" POJO now. The arguments to the method are bundled up atomically into an message and sent to the receiver (the actual POJO instance). + +One-way message send +^^^^^^^^^^^^^^^^^^^^ + +Methods that return void are turned into ‘fire-and-forget’ semantics by asynchronously firing off the message and return immediately. In the example above it would be the 'register' method, so if this method is invoked then it returns immediately: + +.. code-block:: java + + // method invocation returns immediately and method is invoke asynchronously using the Actor Model semantics + service.register(user, creds) + +Request-reply message send +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Methods that return something (e.g. non-void methods) are turned into ‘send-and-recieve-eventually’ semantics by asynchronously firing off the message and wait on the reply using a Future. + +.. code-block:: scala + + // method invocation is asynchronously dispatched using the Actor Model semantics, + // but it blocks waiting on a Future to be resolved in the background + val user = service.getUser(username) + +Generally it is preferred to use fire-forget messages as much as possible since they will never block, e.g. consume a resource by waiting. But sometimes they are neat to use since they: +# Simulates standard Java method dispatch, which is more intuitive for most Java developers +# Are a neat to model request-reply +# Are useful when you need to do things in a defined order + +Request-reply-with-future message send +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Methods that return a 'akka.dispatch.Future' are turned into ‘send-and-recieve-with-future’ semantics by asynchronously firing off the message and returns immediately with a Future. You need to use the 'future(...)' method in the TypedActor base class to resolve the Future that the client code is waiting on. + +Here is an example: + +.. code-block:: scala + + class MathTypedActorImpl extends TypedActor with MathTypedActor { + def square(x: Int): Future[Integer] = future(x * x) + } + + // create the ping actor + val math = TypedActor.newInstance(classOf[MathTyped], classOf[MathTypedImpl]) + + // This method will return immediately when called, caller should wait on the Future for the result + val future = math.square(10) + future.await + val result: Int = future.get + +Stopping Typed Actors +--------------------- + +Once Typed Actors have been created with one of the TypedActor.newInstance methods they need to be stopped with TypedActor.stop to free resources allocated by the created Typed Actor (this is not needed when the Typed Actor is `supervised `_). + +.. code-block:: scala + + // Create Typed Actor + val service = TypedActor.newInstance(classOf[RegistrationService], classOf[RegistrationServiceImpl], 1000) + + // ... + + // Free Typed Actor resources + TypedActor.stop(service) + +When the Typed Actor defines a `shutdown callback `_ method it will be invoked on TypedActor.stop. + +How to use the TypedActorContext for runtime information access +--------------------------------------------------------------- + +The 'akka.actor.TypedActorContext' class Holds 'runtime type information' (RTTI) for the Typed Actor. This context is a member field in the TypedActor base class and holds for example the current sender reference, the current sender future etc. + +Here is an example how you can use it to in a 'void' (e.g. fire-forget) method to implement request-reply by using the sender reference: + +.. code-block:: scala + + class PingImpl extends TypedActor with Ping { + + def hit(count: Int) { + val pong = context.getSender.asInstanceOf[Pong] + pong.hit(count++) + } + } + +If the sender, sender future etc. is not available, then these methods will return 'null' so you should have a way of dealing with scenario. + +Messages and immutability +------------------------- + +**IMPORTANT**: Messages can be any kind of object but have to be immutable (there is a workaround, see next section). Java or Scala can’t enforce immutability (yet) so this has to be by convention. Primitives like String, int, Long are always immutable. Apart from these you have to create your own immutable objects to send as messages. If you pass on a reference to an instance that is mutable then this instance can be modified concurrently by two different Typed Actors and the Actor model is broken leaving you with NO guarantees and most likely corrupt data. diff --git a/akka-docs/pending/untyped-actors-java.rst b/akka-docs/pending/untyped-actors-java.rst new file mode 100644 index 0000000000..760b5fd324 --- /dev/null +++ b/akka-docs/pending/untyped-actors-java.rst @@ -0,0 +1,416 @@ +Actors (Java) +============= + += + +Module stability: **SOLID** + +The `Actor Model `_ provides a higher level of abstraction for writing concurrent and distributed systems. It alleviates the developer from having to deal with explicit locking and thread management, making it easier to write correct concurrent and parallel systems. Actors were defined in the 1973 paper by Carl Hewitt but have been popularized by the Erlang language, and used for example at Ericsson with great success to build highly concurrent and reliable telecom systems. + +Defining an Actor class +^^^^^^^^^^^^^^^^^^^^^^^ + +Actors in Java are created either by extending the 'UntypedActor' class and implementing the 'onReceive' method. This method takes the message as a parameter. + +Here is an example: + +.. code-block:: java + + public class SampleUntypedActor extends UntypedActor { + + public void onReceive(Object message) throws Exception { + if (message instanceof String) + EventHandler.info(this, String.format("Received String message: %s", message)); + else + throw new IllegalArgumentException("Unknown message: " + message); + } + } + +Creating Actors +^^^^^^^^^^^^^^^ + +Creating an Actor is done using the 'akka.actor.Actors.actorOf' factory method. This method returns a reference to the UntypedActor's ActorRef. This 'ActorRef' is an immutable serializable reference that you should use to communicate with the actor, send messages, link to it etc. This reference also functions as the context for the actor and holds run-time type information such as sender of the last message, + +.. code-block:: java + + ActorRef myActor = Actors.actorOf(SampleUntypedActor.class); + myActor.start(); + +Normally you would want to import the 'actorOf' method like this: + +.. code-block:: java + + import static akka.actor.Actors.*; + ActorRef myActor = actorOf(SampleUntypedActor.class); + +To avoid prefix it with 'Actors' every time you use it. + +You can also create & start the actor in one statement: + +.. code-block:: java + + ActorRef myActor = actorOf(SampleUntypedActor.class).start(); + +The call to 'actorOf' returns an instance of 'ActorRef'. This is a handle to the 'UntypedActor' instance which you can use to interact with the Actor, like send messages to it etc. more on this shortly. The 'ActorRef' is immutble and has a one to one relationship with the Actor it represents. The 'ActorRef' is also serializable and network-aware. This means that you can serialize it, send it over the wire and use it on a remote host and it will still be representing the same Actor on the original node, across the network. + +Creating Actors with non-default constructor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If your UntypedActor has a constructor that takes parameters then you can't create it using 'actorOf(clazz)'. Instead you can use a variant of 'actorOf' that takes an instance of an 'UntypedActorFactory' in which you can create the Actor in any way you like. If you use this method then you to make sure that no one can get a reference to the actor instance. If they can get a reference it then they can touch state directly in bypass the whole actor dispatching mechanism and create race conditions which can lead to corrupt data. + +Here is an example: + +.. code-block:: java + + ActorRef actor = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new MyUntypedActor("service:name", 5); + } + }); + +This way of creating the Actor is also great for integrating with Dependency Injection (DI) frameworks like Guice or Spring. + +UntypedActor context +-------------------- + +The UntypedActor base class contains almost no member fields or methods to invoke. It only has the 'onReceive(Object message)' method, which is defining the Actor's message handler, and some life-cycle callbacks that you can choose to implement: +## preStart +## postStop +## preRestart +## postRestart + +Most of the API is in the UnypedActorRef a reference for the actor. This reference is available in the 'getContext()' method in the UntypedActor (or you can use its alias, the 'context()' method, if you prefer. Here, for example, you find methods to reply to messages, send yourself messages, define timeouts, fault tolerance etc., start and stop etc. + +Identifying Actors +------------------ + +Each ActorRef has two methods: +* getContext().getUuid(); +* getContext().getId(); + +The difference is that the 'uuid' is generated by the runtime, guaranteed to be unique and can't be modified. While the 'id' can be set by the user (using 'getContext().setId(...)', and defaults to Actor class name. You can retrieve Actors by both UUID and ID using the 'ActorRegistry', see the section further down for details. + +Messages and immutability +------------------------- + +**IMPORTANT**: Messages can be any kind of object but have to be immutable. Akka can’t enforce immutability (yet) so this has to be by convention. + +Send messages +------------- + +Messages are sent to an Actor through one of the 'send' methods. +* 'sendOneWay' means “fire-and-forget”, e.g. send a message asynchronously and return immediately. +* 'sendRequestReply' means “send-and-reply-eventually”, e.g. send a message asynchronously and wait for a reply through a Future. Here you can specify a timeout. Using timeouts is very important. If no timeout is specified then the actor’s default timeout (set by the 'getContext().setTimeout(..)' method in the 'ActorRef') is used. This method throws an 'ActorTimeoutException' if the call timed out. +* 'sendRequestReplyFuture' sends a message asynchronously and returns a 'Future'. + +In all these methods you have the option of passing along your 'ActorRef' context variable. Make it a practive of doing so because it will allow the receiver actors to be able to respond to your message, since the sender reference is sent along with the message. + +Fire-forget +^^^^^^^^^^^ + +This is the preferred way of sending messages. No blocking waiting for a message. Give best concurrency and scalability characteristics. + +.. code-block:: java + + actor.sendOneWay("Hello"); + +Or with the sender reference passed along: + +.. code-block:: java + + actor.sendOneWay("Hello", getContext()); + +If invoked from within an Actor, then the sending actor reference will be implicitly passed along with the message and available to the receiving Actor in its 'getContext().getSender();' method. He can use this to reply to the original sender or use the 'getContext().reply(message);' method. + +If invoked from an instance that is **not** an Actor there will be no implicit sender passed along the message and you will get an 'IllegalStateException' if you call 'getContext().reply(..)'. + +Send-And-Receive-Eventually +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using 'sendRequestReply' will send a message to the receiving Actor asynchronously but it will wait for a reply on a 'Future', blocking the sender Actor until either: + +* A reply is received, or +* The Future times out and an 'ActorTimeoutException' is thrown. + +You can pass an explicit time-out to the 'sendRequestReply' method and if none is specified then the default time-out defined in the sender Actor will be used. + +Here are some examples: + +.. code-block:: java + + UnypedActorRef actorRef = ... + + try { + Object result = actorRef.sendRequestReply("Hello", getContext(), 1000); + ... // handle reply + } catch(ActorTimeoutException e) { + ... // handle timeout + } + +Send-And-Receive-Future +^^^^^^^^^^^^^^^^^^^^^^^ + +Using 'sendRequestReplyFuture' will send a message to the receiving Actor asynchronously and will immediately return a 'Future'. + +.. code-block:: java + + Future future = actorRef.sendRequestReplyFuture("Hello", getContext(), 1000); + +The 'Future' interface looks like this: + +.. code-block:: java + + interface Future { + void await(); + void awaitBlocking(); + boolean isCompleted(); + boolean isExpired(); + long timeoutInNanos(); + Option result(); + Option exception(); + Future onComplete(Procedure> procedure); + } + +So the normal way of working with futures is something like this: + +.. code-block:: java + + Future future = actorRef.sendRequestReplyFuture("Hello", getContext(), 1000); + future.await(); + if (future.isCompleted()) { + Option resultOption = future.result(); + if (resultOption.isDefined()) { + Object result = resultOption.get(); + ... + } + ... // whatever + } + +The 'onComplete' callback can be used to register a callback to get a notification when the Future completes. Gives you a way to avoid blocking. + +Forward message +^^^^^^^^^^^^^^^ + +You can forward a message from one actor to another. This means that the original sender address/reference is maintained even though the message is going through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc. You need to pass along your ActorRef context variable as well. + +.. code-block:: java + + getContext().forward(message, getContext()); + +Receive messages +---------------- + +When an actor receives a message it is passed into the 'onReceive' method, this is an abstract method on the 'UntypedActor' base class that needs to be defined. + +Here is an example: + +.. code-block:: java + + public class SampleUntypedActor extends UntypedActor { + + public void onReceive(Object message) throws Exception { + if (message instanceof String) + EventHandler.info(this, String.format("Received String message: %s", message)); + else + throw new IllegalArgumentException("Unknown message: " + message); + } + } + +Reply to messages +----------------- + +Reply using the 'replySafe' and 'replyUnsafe' methods +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to send a message back to the original sender of the message you just received then you can use the 'getContext().replyUnsafe(..)' method. + +.. code-block:: java + + public void onReceive(Object message) throws Exception { + if (message instanceof String) { + String msg = (String)message; + if (msg.equals("Hello")) { + // Reply to original sender of message using the 'replyUnsafe' method + getContext().replyUnsafe(msg + " from " + getContext().getUuid()); + } + } + } + +In this case we will a reply back to the Actor that sent the message. + +The 'replyUnsafe' method throws an 'IllegalStateException' if unable to determine what to reply to, e.g. the sender has not been passed along with the message when invoking one of 'send*' methods. You can also use the more forgiving 'replySafe' method which returns 'true' if reply was sent, and 'false' if unable to determine what to reply to. + +.. code-block:: java + + public void onReceive(Object message) throws Exception { + if (message instanceof String) { + String msg = (String)message; + if (msg.equals("Hello")) { + // Reply to original sender of message using the 'replyUnsafe' method + if (getContext().replySafe(msg + " from " + getContext().getUuid())) ... // success + else ... // handle failure + } + } + } + +Reply using the sender reference +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If the sender reference (the sender's 'ActorRef') is passed into one ofe the 'send*' methods it will be implicitly passed along together with the message and will be available in the 'Option getSender()' method on the 'ActorRef. This means that you can use this field to send a message back to the sender. + +On this 'Option' you can invoke 'boolean isDefined()' or 'boolean isEmpty()' to check if the sender is available or not, and if it is call 'get()' to get the reference. It's important to know that 'getSender().get()' will throw an exception if there is no sender in scope. The same pattern holds for using the 'getSenderFuture()' in the section below. + +.. code-block:: java + + public void onReceive(Object message) throws Exception { + if (message instanceof String) { + String msg = (String)message; + if (msg.equals("Hello")) { + // Reply to original sender of message using the sender reference + // also passing along my own refererence (the context) + if (getContext().getSender().isDefined) + getContext().getSender().get().sendOneWay(msg + " from " + getContext().getUuid(), getContext()); + } + } + } + +Reply using the sender future +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If a message was sent with the 'sendRequestReply' or 'sendRequestReplyFuture' methods, which both implements request-reply semantics using Future's, then you either have the option of replying using the 'reply' method as above. This method will then resolve the Future. But you can also get a reference to the Future directly and resolve it yourself or if you would like to store it away to resolve it later, or pass it on to some other Actor to resolve it. + +The reference to the Future resides in the 'ActorRef' instance and can be retreived using 'Option getSenderFuture()'. + +CompletableFuture is a future with methods for 'completing the future: +* completeWithResult(..) +* completeWithException(..) + +Here is an example of how it can be used: + +.. code-block:: java + + public void onReceive(Object message) throws Exception { + if (message instanceof String) { + String msg = (String)message; + if (msg.equals("Hello") && getContext().getSenderFuture().isDefined()) { + // Reply to original sender of message using the sender future reference + getContext().getSenderFuture().get().completeWithResult(msg + " from " + getContext().getUuid()); + } + } + } + +Reply using the channel +^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to have a handle to an object to whom you can reply to the message, you can use the Channel abstraction. +Simply call getContext().channel() and then you can forward that to others, store it away or otherwise until you want to reply, +which you do by Channel.sendOneWay(msg) + +.. code-block:: java + + public void onReceive(Object message) throws Exception { + if (message instanceof String) { + String msg = (String)message; + if (msg.equals("Hello") && getContext().getSenderFuture().isDefined()) { + // Reply to original sender of message using the channel + getContext().channel().sendOneWay(msg + " from " + getContext().getUuid()); + } + } + } + +Summary of reply semantics and options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* getContext().reply(...) can be used to reply to an Actor or a Future. +* getContext().getSender() is a reference to the actor you can reply to, if it exists +* getContext().getSenderFuture() is a reference to the future you can reply to, if it exists +* getContext().channel() is a reference providing an abstraction to either self.sender or self.senderFuture if one is set, providing a single reference to store and reply to (the reference equivalent to the 'reply(...)' method). +* getContext().getSender() and getContext().getSenderFuture() will never be set at the same time, as there can only be one reference to accept a reply. + +Starting actors +--------------- + +Actors are started by invoking the ‘start’ method. + +.. code-block:: java + + ActorRef actor = actorOf(SampleUntypedActor.class); + myActor.start(); + +You can create and start the Actor in a oneliner like this: + +.. code-block:: java + + ActorRef actor = actorOf(SampleUntypedActor.class).start(); + +When you start the actor then it will automatically call the 'preStart' callback method on the 'UntypedActor'. This is an excellent place to add initialization code for the actor. + +.. code-block:: java + + @Override + void preStart() { + ... // initialization code + } + +Stopping actors +--------------- + +Actors are stopped by invoking the ‘stop’ method. + +.. code-block:: java + + actor.stop(); + +When stop is called then a call to the ‘postStop’ callback method will take place. The Actor can use this callback to implement shutdown behavior. + +.. code-block:: java + + @Override + void postStop() { + ... // clean up resources + } + +You can shut down all Actors in the system by invoking: + +.. code-block:: java + + Actors.registry().shutdownAll(); + +PoisonPill +---------- + +You can also send an actor the akka.actor.PoisonPill message, which will stop the actor when the message is processed. +If the sender is a Future, the Future will be completed with an akka.actor.ActorKilledException("PoisonPill") + +Use it like this: + +.. code-block:: java + + import static akka.actor.Actors.*; + + actor.sendOneWay(poisonPill()); + +Killing an Actor +---------------- + +You can kill an actor by sending a 'new Kill()' message. This will restart the actor through regular supervisor semantics. + +Use it like this: + +.. code-block:: java + + import static akka.actor.Actors.*; + + // kill the actor called 'victim' + victim.sendOneWay(kill()); + +Actor life-cycle +---------------- + +The actor has a well-defined non-circular life-cycle. + +``_ +NEW (newly created actor) - can't receive messages (yet) + => STARTED (when 'start' is invoked) - can receive messages + => SHUT DOWN (when 'exit' or 'stop' is invoked) - can't do anything +``_ diff --git a/akka-docs/pending/use-cases.rst b/akka-docs/pending/use-cases.rst new file mode 100644 index 0000000000..8647d0b17c --- /dev/null +++ b/akka-docs/pending/use-cases.rst @@ -0,0 +1,31 @@ +Examples of use-cases for Akka +============================== + +There is a great discussion on use-cases for Akka with some good write-ups by production users here: ``_ + +Here are some of the areas where Akka is being deployed into production +----------------------------------------------------------------------- + +# **Transaction processing (Online Gaming, Finance/Banking, Trading, Statistics, Betting, Social Media, Telecom)** +** Scale up, scale out, fault-tolerance / HA +# **Service backend (any industry, any app)** +** Service REST, SOAP, Cometd, WebSockets etc +** Act as message hub / integration layer +** Scale up, scale out, fault-tolerance / HA +# **Concurrency/parallelism (any app)** +** Correct +** Simple to work with and understand +** Just add the jars to your existing JVM project (use Scala, Java, Groovy or JRuby) +# **Simulation** +** Master/Worker, Compute Grid, MapReduce etc. +# **Batch processing (any industry)** +** Camel integration to hook up with batch data sources +** Actors divide and conquer the batch workloads +# **Communications Hub (Telecom, Web media, Mobile media)** +** Scale up, scale out, fault-tolerance / HA +# **Gaming and Betting (MOM, online gaming, betting)** +** Scale up, scale out, fault-tolerance / HA +# **Business Intelligence/Data Mining/general purpose crunching** +** Scale up, scale out, fault-tolerance / HA +# **Complex Event Stream Processing** +** Scale up, scale out, fault-tolerance / HA diff --git a/akka-docs/pending/web.rst b/akka-docs/pending/web.rst new file mode 100644 index 0000000000..7d09ede65c --- /dev/null +++ b/akka-docs/pending/web.rst @@ -0,0 +1,99 @@ +Web Framework Integrations +========================== + +Play Framework +============== + +Home page: ``_ +Akka Play plugin: ``_ +Read more here: ``_ + +Lift Web Framework +================== + +Home page: ``_ + +In order to use Akka with Lift you basically just have to do one thing, add the 'AkkaServlet' to your 'web.xml'. + +web.xml +------- + +.. code-block:: xml + + + + + AkkaServlet + akka.comet.AkkaServlet + + + AkkaServlet + /* + + + + + LiftFilter + Lift Filter + The Filter that intercepts lift calls + net.liftweb.http.LiftFilter + + + LiftFilter + /* + + + +Boot class +---------- + +Lift bootstrap happens in the Lift 'Boot' class. Here is a good place to add Akka specific initialization. For example add declarative supervisor configuration to wire up the initial Actors. +Here is a full example taken from the Akka sample code, found here ``_. + +If a request is processed by Liftweb filter, Akka will not process the request. To disable processing of a request by the Lift filter : +* append partial function to LiftRules.liftRequest and return *false* value to disable processing of matching request +* use LiftRules.passNotFoundToChain to chain the request to the Akka filter + +Example of Boot class source code : +``_ +class Boot { + def boot { + // where to search snippet + LiftRules.addToPackages("sample.lift") + + LiftRules.httpAuthProtectedResource.prepend { + case (ParsePath("liftpage" :: Nil, _, _, _)) => Full(AuthRole("admin")) + } + + LiftRules.authentication = HttpBasicAuthentication("lift") { + case ("someuser", "1234", req) => { + Log.info("You are now authenticated !") + userRoles(AuthRole("admin")) + true + } + } + + LiftRules.liftRequest.append { + case Req("liftcount" :: _, _, _) => false + case Req("persistentliftcount" :: _, _, _) => false + } + LiftRules.passNotFoundToChain = true + + // Akka supervisor configuration wiring up initial Actor services + val supervisor = Supervisor( + SupervisorConfig( + RestartStrategy(OneForOne, 3, 100, List(classOf[Exception])), + Supervise( + actorOf[SimpleService], + LifeCycle(Permanent)) :: + Supervise( + actorOf[PersistentSimpleService], + LifeCycle(Permanent)) :: + Nil)) + + // Build SiteMap + // val entries = Menu(Loc("Home", List("index"), "Home")) :: Nil + // LiftRules.setSiteMap(SiteMap(entries:_*)) + } +} +``_ diff --git a/akka-docs/themes/akka/layout.html b/akka-docs/themes/akka/layout.html new file mode 100644 index 0000000000..db443c3fe5 --- /dev/null +++ b/akka-docs/themes/akka/layout.html @@ -0,0 +1,64 @@ +{# + akka/layout.html + ~~~~~~~~~~~~~~~~~ +#} + +{% extends "basic/layout.html" %} +{% set script_files = script_files + ['_static/theme_extras.js'] %} +{% set css_files = css_files + ['_static/print.css'] %} + +{# do not display relbars #} +{% block relbar1 %}{% endblock %} +{% block relbar2 %}{% endblock %} + +{% macro nav() %} +

+ {%- block akkarel1 %} + {%- endblock %} + {%- if prev %} + «  {{ prev.title }} +   ::   + {%- endif %} + {{ _('Contents') }} + {%- if next %} +   ::   + {{ next.title }}  » + {%- endif %} + {%- block akkarel2 %} + {%- endblock %} +

+{% endmacro %} + +{% block content %} +
+ {%- block akkaheader %} + {%- if theme_full_logo != "false" %} + + + + {%- else %} + {%- if logo -%} + + {%- endif -%} +

+ {{ shorttitle|e }}

+

{{ title|striptags|e }}

+ {%- endif %} + {%- endblock %} +
+
+ {{ nav() }} +
+
+ {#{%- if display_toc %} +
+

Table Of Contents

+ {{ toc }} +
+ {%- endif %}#} + {% block body %}{% endblock %} +
+
+ {{ nav() }} +
+{% endblock %} diff --git a/akka-docs/themes/akka/pygments/akka.py b/akka-docs/themes/akka/pygments/akka.py new file mode 100644 index 0000000000..af9fe61bf9 --- /dev/null +++ b/akka-docs/themes/akka/pygments/akka.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +""" + pygments.styles.akka + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Akka style for Scala highlighting. +""" + +from pygments.style import Style +from pygments.token import Keyword, Name, Comment, String, Error, \ + Number, Operator, Generic, Whitespace + + +class AkkaStyle(Style): + """ + Akka style for Scala highlighting. + """ + + background_color = "#f0f0f0" + default_style = "" + + styles = { + Whitespace: "#f0f0f0", + Comment: "#777766", + Comment.Preproc: "", + Comment.Special: "", + + Keyword: "#000080", + Keyword.Pseudo: "", + Keyword.Type: "", + + Operator: "#000000", + Operator.Word: "", + + Name.Builtin: "#000000", + Name.Function: "#000000", + Name.Class: "#000000", + Name.Namespace: "#000000", + Name.Exception: "#000000", + Name.Variable: "#000000", + Name.Constant: "bold #000000", + Name.Label: "#000000", + Name.Entity: "#000000", + Name.Attribute: "#000000", + Name.Tag: "#000000", + Name.Decorator: "#000000", + + String: "#008000", + String.Doc: "", + String.Interpol: "", + String.Escape: "", + String.Regex: "", + String.Symbol: "", + String.Other: "", + Number: "#008000", + + Error: "border:#FF0000" + } diff --git a/akka-docs/themes/akka/static/akka.css_t b/akka-docs/themes/akka/static/akka.css_t new file mode 100644 index 0000000000..7c417e9917 --- /dev/null +++ b/akka-docs/themes/akka/static/akka.css_t @@ -0,0 +1,352 @@ +/* + * akka.css_t + */ + +@import url("basic.css"); + +html { + margin: 0px; + padding: 0px; + background: #FFF url(bg-page.png) top left repeat-x; +} + +body { + line-height: 1.5; + margin: auto; + padding: 0px; + font-family: Helvetica, Arial, sans-serif; + min-width: 59em; + max-width: 70em; + color: {{ theme_textcolor }}; +} + +div.footer { + padding: 8px; + font-size: 11px; + text-align: center; + letter-spacing: 0.5px; +} + +/* link colors and text decoration */ + +a:link { + font-weight: bold; + text-decoration: none; + color: {{ theme_linkcolor }}; +} + +a:visited { + font-weight: bold; + text-decoration: none; + color: {{ theme_visitedlinkcolor }}; +} + +a:hover, a:active { + text-decoration: underline; + color: {{ theme_hoverlinkcolor }}; +} + +/* Some headers act as anchors, don't give them a hover effect */ + +h1 a:hover, a:active { + text-decoration: none; + color: {{ theme_headingcolor }}; +} + +h2 a:hover, a:active { + text-decoration: none; + color: {{ theme_headingcolor }}; +} + +h3 a:hover, a:active { + text-decoration: none; + color: {{ theme_headingcolor }}; +} + +h4 a:hover, a:active { + text-decoration: none; + color: {{ theme_headingcolor }}; +} + +a.headerlink { + color: #a7ce38; + padding-left: 5px; +} + +a.headerlink:hover { + color: #a7ce38; +} + +/* basic text elements */ + +div.content { + margin-top: 20px; + margin-left: 40px; + margin-right: 40px; + margin-bottom: 50px; + font-size: 0.9em; +} + +/* heading and navigation */ + +div.header { + position: relative; + left: 0px; + top: 0px; + height: 85px; + /* background: #eeeeee; */ + padding: 0 40px; +} +div.header h1 { + font-size: 1.6em; + font-weight: normal; + letter-spacing: 1px; + color: {{ theme_headingcolor }}; + border: 0; + margin: 0; + padding-top: 15px; +} +div.header h1 a { + font-weight: normal; + color: {{ theme_headingcolor }}; +} +div.header h2 { + font-size: 1.3em; + font-weight: normal; + letter-spacing: 1px; + text-transform: uppercase; + color: #aaa; + border: 0; + margin-top: -3px; + padding: 0; +} + +div.header img.rightlogo { + float: right; +} + + +div.title { + font-size: 1.3em; + font-weight: bold; + color: {{ theme_headingcolor }}; + border-bottom: dotted thin #e0e0e0; + margin-bottom: 25px; +} +div.topnav { + /* background: #e0e0e0; */ +} +div.topnav p { + margin-top: 0; + margin-left: 40px; + margin-right: 40px; + margin-bottom: 0px; + text-align: right; + font-size: 0.8em; +} +div.bottomnav { + background: #eeeeee; +} +div.bottomnav p { + margin-right: 40px; + text-align: right; + font-size: 0.8em; +} + +a.uplink { + font-weight: normal; +} + + +/* contents box */ + +table.index { + margin: 0px 0px 30px 30px; + padding: 1px; + border-width: 1px; + border-style: dotted; + border-color: #e0e0e0; +} +table.index tr.heading { + background-color: #e0e0e0; + text-align: center; + font-weight: bold; + font-size: 1.1em; +} +table.index tr.index { + background-color: #eeeeee; +} +table.index td { + padding: 5px 20px; +} + +table.index a:link, table.index a:visited { + font-weight: normal; + text-decoration: none; + color: {{ theme_linkcolor }}; +} +table.index a:hover, table.index a:active { + text-decoration: underline; + color: {{ theme_hoverlinkcolor }}; +} + + +/* Akka Cloud Manual styles and layout */ + +/* Rounded corner boxes */ +/* Common declarations */ +div.admonition { + -webkit-border-radius: 10px; + -khtml-border-radius: 10px; + -moz-border-radius: 10px; + border-radius: 10px; + border-style: dotted; + border-width: thin; + border-color: #dcdcdc; + padding: 10px 15px 10px 15px; + margin-bottom: 15px; + margin-top: 15px; +} +div.note { + padding: 10px 15px 10px 80px; + background: #e4ffde url(alert_info_32.png) 15px 15px no-repeat; + min-height: 42px; +} +div.warning { + padding: 10px 15px 10px 80px; + background: #fffbc6 url(alert_warning_32.png) 15px 15px no-repeat; + min-height: 42px; +} +div.seealso { + background: #e4ffde; +} + +/* More layout and styles */ +h1 { + font-size: 1.3em; + font-weight: bold; + color: {{ theme_headingcolor }}; + border-bottom: dotted thin #e0e0e0; + margin-top: 30px; +} + +h2 { + font-size: 1.2em; + font-weight: normal; + color: {{ theme_headingcolor }}; + border-bottom: dotted thin #e0e0e0; + margin-top: 30px; +} + +h3 { + font-size: 1.1em; + font-weight: normal; + color: {{ theme_headingcolor }}; + margin-top: 30px; +} + +h4 { + font-size: 1.0em; + font-weight: normal; + color: {{ theme_headingcolor }}; + margin-top: 30px; +} + +p { + text-align: justify; +} + +p.last { + margin-bottom: 0; +} + +ol { + padding-left: 20px; +} + +ul { + padding-left: 5px; + margin-top: 3px; +} + +li { + line-height: 1.3; +} + +div.content ul > li { + -moz-background-clip:border; + -moz-background-inline-policy:continuous; + -moz-background-origin:padding; + background: transparent url(bullet_orange.png) no-repeat scroll left 0.45em; + list-style-image: none; + list-style-type: none; + padding: 0 0 0 1.666em; + margin-bottom: 3px; +} + +td { + vertical-align: top; +} + +tt { + background-color: #e2e2e2; + font-size: 1.0em; + font-family: monospace; +} + +pre { + border-color: #0c3762; + border-style: dotted; + border-width: thin; + margin: 0 0 12px 0; + padding: 0.8em; + background-color: #f0f0f0; +} + +hr { + border-top: 1px solid #ccc; + border-bottom: 0; + border-right: 0; + border-left: 0; + margin-bottom: 10px; + margin-top: 20px; +} + +/* printer only pretty stuff */ +@media print { + .noprint { + display: none; + } + /* for acronyms we want their definitions inlined at print time */ + acronym[title]:after { + font-size: small; + content: " (" attr(title) ")"; + font-style: italic; + } + /* and not have mozilla dotted underline */ + acronym { + border: none; + } + div.topnav, div.bottomnav, div.header, table.index { + display: none; + } + div.content { + margin: 0px; + padding: 0px; + } + html { + background: #FFF; + } +} + +.viewcode-back { + font-family: Helvetica, Arial, sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; + margin: -1px -12px; + padding: 0 12px; +} diff --git a/akka-docs/themes/akka/static/alert_info_32.png b/akka-docs/themes/akka/static/alert_info_32.png new file mode 100644 index 0000000000..05b4fe898c Binary files /dev/null and b/akka-docs/themes/akka/static/alert_info_32.png differ diff --git a/akka-docs/themes/akka/static/alert_warning_32.png b/akka-docs/themes/akka/static/alert_warning_32.png new file mode 100644 index 0000000000..f13611cde4 Binary files /dev/null and b/akka-docs/themes/akka/static/alert_warning_32.png differ diff --git a/akka-docs/themes/akka/static/bg-page.png b/akka-docs/themes/akka/static/bg-page.png new file mode 100644 index 0000000000..c6f3bc477c Binary files /dev/null and b/akka-docs/themes/akka/static/bg-page.png differ diff --git a/akka-docs/themes/akka/static/bullet_orange.png b/akka-docs/themes/akka/static/bullet_orange.png new file mode 100644 index 0000000000..ad5d02f341 Binary files /dev/null and b/akka-docs/themes/akka/static/bullet_orange.png differ diff --git a/akka-docs/themes/akka/theme.conf b/akka-docs/themes/akka/theme.conf new file mode 100644 index 0000000000..7f45fd1718 --- /dev/null +++ b/akka-docs/themes/akka/theme.conf @@ -0,0 +1,12 @@ +[theme] +inherit = basic +stylesheet = akka.css +pygments_style = friendly + +[options] +full_logo = false +textcolor = #333333 +headingcolor = #0c3762 +linkcolor = #dc3c01 +visitedlinkcolor = #892601 +hoverlinkcolor = #ff4500 diff --git a/akka-http/src/main/scala/akka/http/Mist.scala b/akka-http/src/main/scala/akka/http/Mist.scala index eb91b9737f..379cbfb36d 100644 --- a/akka-http/src/main/scala/akka/http/Mist.scala +++ b/akka-http/src/main/scala/akka/http/Mist.scala @@ -4,7 +4,7 @@ package akka.http -import akka.actor.{ActorRegistry, ActorRef, Actor} +import akka.actor.{ActorRef, Actor} import akka.event.EventHandler import javax.servlet.http.{HttpServletResponse, HttpServletRequest} @@ -17,8 +17,8 @@ import javax.servlet.Filter object MistSettings { import akka.config.Config._ - final val JettyServer = "jetty" - final val TimeoutAttribute = "timeout" + val JettyServer = "jetty" + val TimeoutAttribute = "timeout" val ConnectionClose = config.getBool("akka.http.connection-close", true) val RootActorBuiltin = config.getBool("akka.http.root-actor-builtin", true) @@ -64,7 +64,7 @@ import Types._ * */ trait Mist { - import javax.servlet.{ServletContext} + import javax.servlet.ServletContext import MistSettings._ /** @@ -84,28 +84,21 @@ trait Mist { response: HttpServletResponse) (builder: (() => tAsyncRequestContext) => RequestMethod) = { def suspend: tAsyncRequestContext = { - // + // set to right now, which is effectively "already expired" - // response.setDateHeader("Expires", System.currentTimeMillis) response.setHeader("Cache-Control", "no-cache, must-revalidate") - // // no keep-alive? - // if (ConnectionClose) response.setHeader("Connection","close") - // // suspend the request // TODO: move this out to the specialized support if jetty asyncstart doesnt let us update TOs - // request.asInstanceOf[tAsyncRequest].startAsync.asInstanceOf[tAsyncRequestContext] } - // // shoot the message to the root endpoint for processing // IMPORTANT: the suspend method is invoked on the server thread not in the actor - // val method = builder(suspend _) if (method.go) _root ! method } @@ -117,7 +110,6 @@ trait Mist { def initMist(context: ServletContext) { val server = context.getServerInfo val (major, minor) = (context.getMajorVersion, context.getMinorVersion) - _factory = if (major >= 3) { Some(Servlet30ContextMethodFactory) } else if (server.toLowerCase startsWith JettyServer) { @@ -200,7 +192,7 @@ object Endpoint { /** * leverage the akka config to tweak the dispatcher for our endpoints */ - final val Dispatcher = Dispatchers.fromConfig("akka.http.mist-dispatcher") + val Dispatcher = Dispatchers.fromConfig("akka.http.mist-dispatcher") type Hook = Function[String, Boolean] type Provider = Function[String, ActorRef] @@ -236,25 +228,21 @@ trait Endpoint { this: Actor => * Message handling common to all endpoints, must be chained */ protected def handleHttpRequest: Receive = { - // + // add the endpoint - the if the uri hook matches, // the message will be sent to the actor returned by the provider func - // case Attach(hook, provider) => _attach(hook, provider) - // // dispatch the suspended requests - // case req: RequestMethod => { val uri = req.request.getPathInfo val endpoints = _attachments.filter { _._1(uri) } - if (!endpoints.isEmpty) - endpoints.foreach { _._2(uri) ! req } + if (!endpoints.isEmpty) endpoints.foreach { _._2(uri) ! req } else { self.sender match { case Some(s) => s reply NoneAvailable(uri, req) - case None => _na(uri, req) + case None => _na(uri, req) } } } @@ -275,23 +263,15 @@ class RootEndpoint extends Actor with Endpoint { final val Root = "/" - // // use the configurable dispatcher - // self.dispatcher = Endpoint.Dispatcher - // // adopt the configured id - // if (RootActorBuiltin) self.id = RootActorID override def preStart = _attachments = Tuple2((uri: String) => {uri eq Root}, (uri: String) => this.actor) :: _attachments - //TODO: Is this needed? - //override def postRestart = - // _attachments = Tuple2((uri: String) => {uri eq Root}, (uri: String) => this.actor) :: _attachments - def recv: Receive = { case NoneAvailable(uri, req) => _na(uri, req) case unknown => {} @@ -317,10 +297,7 @@ trait RequestMethod { import java.io.IOException import javax.servlet.http.{HttpServletResponse, HttpServletRequest} - // // required implementations - // - val builder: () => tAsyncRequestContext /** @@ -353,35 +330,31 @@ trait RequestMethod { def getHeaderOrElse(name: String, default: Function[Any, String]): String = request.getHeader(name) match { case null => default(null) - case s => s - } + case s => s + } def getParameterOrElse(name: String, default: Function[Any, String]): String = request.getParameter(name) match { case null => default(null) - case s => s + case s => s } def complete(status: Int, body: String): Boolean = complete(status, body, Headers()) def complete(status: Int, body: String, headers: Headers): Boolean = - rawComplete { - res => { - res.setStatus(status) - headers foreach {h => response.setHeader(h._1, h._2)} - res.getWriter.write(body) - res.getWriter.close - res.flushBuffer - } + rawComplete { res => + res.setStatus(status) + headers foreach {h => response.setHeader(h._1, h._2)} + res.getWriter.write(body) + res.getWriter.close + res.flushBuffer } def rawComplete(completion: HttpServletResponse => Unit): Boolean = context match { - case Some(pipe) => { + case Some(pipe) => try { - if (!suspended) { - false - } + if (!suspended) false else { completion(response) pipe.complete @@ -392,34 +365,28 @@ trait RequestMethod { EventHandler.error(io, this, io.getMessage) false } - } - - case None => - false + case None => false } def complete(t: Throwable) { context match { - case Some(pipe) => { + case Some(pipe) => try { if (suspended) { response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Failed to write data to connection on resume") pipe.complete } } catch { - case io: IOException => + case io: IOException => EventHandler.error(io, this, io.getMessage) } - } - case None => {} } } - /** + /* * Utility methods to send responses back */ - def OK(body: String): Boolean = complete(HttpServletResponse.SC_OK, body) def OK(body: String, headers:Headers): Boolean = complete(HttpServletResponse.SC_OK, body, headers) def Created(body: String): Boolean = complete(HttpServletResponse.SC_CREATED, body) diff --git a/akka-http/src/test/scala/SecuritySpec.scala b/akka-http/src/test/scala/SecuritySpec.scala index d67d7e3bb7..4b7c7767be 100644 --- a/akka-http/src/test/scala/SecuritySpec.scala +++ b/akka-http/src/test/scala/SecuritySpec.scala @@ -34,7 +34,7 @@ class BasicAuthenticatorSpec extends junit.framework.TestCase import BasicAuthenticatorSpec._ val authenticator = actorOf[BasicAuthenticator] - authenticator.start + authenticator.start() @Test def testChallenge = { val req = mock[ContainerRequest] diff --git a/akka-remote/src/main/scala/akka/remote/BootableRemoteActorService.scala b/akka-remote/src/main/scala/akka/remote/BootableRemoteActorService.scala index bd586ce939..8139b35d0b 100644 --- a/akka-remote/src/main/scala/akka/remote/BootableRemoteActorService.scala +++ b/akka-remote/src/main/scala/akka/remote/BootableRemoteActorService.scala @@ -20,7 +20,7 @@ trait BootableRemoteActorService extends Bootable { def run = Actor.remote.start(self.applicationLoader.getOrElse(null)) //Use config host/port }, "Akka Remote Service") - def startRemoteService = remoteServerThread.start + def startRemoteService = remoteServerThread.start() abstract override def onLoad = { if (ReflectiveAccess.isRemotingEnabled && RemoteServerSettings.isRemotingEnabled) { diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 120b7e1dfc..64e26cfdf5 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -5,46 +5,43 @@ package akka.remote.netty import akka.dispatch.{DefaultCompletableFuture, CompletableFuture, Future} +import akka.remote.{MessageSerializer, RemoteClientSettings, RemoteServerSettings} import akka.remote.protocol.RemoteProtocol._ import akka.remote.protocol.RemoteProtocol.ActorType._ -import akka.config.ConfigurationException import akka.serialization.RemoteActorSerialization import akka.serialization.RemoteActorSerialization._ -import akka.japi.Creator -import akka.config.Config._ import akka.remoteinterface._ -import akka.actor.{PoisonPill, Index, - ActorInitializationException, LocalActorRef, newUuid, - ActorRegistry, Actor, RemoteActorRef, +import akka.actor.{PoisonPill, Index, LocalActorRef, Actor, RemoteActorRef, TypedActor, ActorRef, IllegalActorStateException, RemoteActorSystemMessage, uuidFrom, Uuid, Exit, LifeCycleMessage, ActorType => AkkaActorType} -import akka.AkkaException -import akka.event.EventHandler import akka.actor.Actor._ +import akka.config.Config._ import akka.util._ -import akka.remote.{MessageSerializer, RemoteClientSettings, RemoteServerSettings} +import akka.event.EventHandler import org.jboss.netty.channel._ import org.jboss.netty.channel.group.{DefaultChannelGroup,ChannelGroup,ChannelGroupFuture} import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory -import org.jboss.netty.bootstrap.{ServerBootstrap,ClientBootstrap} +import org.jboss.netty.bootstrap.{ServerBootstrap, ClientBootstrap} import org.jboss.netty.handler.codec.frame.{ LengthFieldBasedFrameDecoder, LengthFieldPrepender } import org.jboss.netty.handler.codec.compression.{ ZlibDecoder, ZlibEncoder } import org.jboss.netty.handler.codec.protobuf.{ ProtobufDecoder, ProtobufEncoder } import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutException } import org.jboss.netty.handler.execution.{ OrderedMemoryAwareThreadPoolExecutor, ExecutionHandler } import org.jboss.netty.util.{ TimerTask, Timeout, HashedWheelTimer } -import org.jboss.netty.handler.ssl.SslHandler -import scala.collection.mutable.{ HashMap } -import scala.reflect.BeanProperty +import scala.collection.mutable.HashMap +import scala.collection.JavaConversions._ -import java.net.{ SocketAddress, InetSocketAddress } +import java.net.InetSocketAddress import java.lang.reflect.InvocationTargetException -import java.util.concurrent.{ TimeUnit, Executors, ConcurrentMap, ConcurrentHashMap, ConcurrentSkipListSet } -import java.util.concurrent.atomic.{AtomicReference, AtomicLong, AtomicBoolean} +import java.util.concurrent.atomic.{AtomicReference, AtomicBoolean} +import java.util.concurrent._ +import akka.AkkaException + +class RemoteClientMessageBufferException(message: String) extends AkkaException(message) object RemoteEncoder { def encode(rmp: RemoteMessageProtocol): AkkaRemoteProtocol = { @@ -157,29 +154,54 @@ trait NettyRemoteClientModule extends RemoteClientModule { self: ListenerManagem } /** - * This is the abstract baseclass for netty remote clients, - * currently there's only an ActiveRemoteClient, but otehrs could be feasible, like a PassiveRemoteClient that + * This is the abstract baseclass for netty remote clients, currently there's only an + * ActiveRemoteClient, but otehrs could be feasible, like a PassiveRemoteClient that * reuses an already established connection. */ abstract class RemoteClient private[akka] ( val module: NettyRemoteClientModule, val remoteAddress: InetSocketAddress) { - val name = this.getClass.getSimpleName + "@" + remoteAddress.getAddress.getHostAddress + "::" + remoteAddress.getPort + val useTransactionLog = config.getBool("akka.remote.client.buffering.retry-message-send-on-failure", true) + val transactionLogCapacity = config.getInt("akka.remote.client.buffering.capacity", -1) - protected val futures = new ConcurrentHashMap[Uuid, CompletableFuture[_]] - protected val supervisors = new ConcurrentHashMap[Uuid, ActorRef] - private[remote] val runSwitch = new Switch() + val name = this.getClass.getSimpleName + "@" + + remoteAddress.getAddress.getHostAddress + "::" + + remoteAddress.getPort + + protected val futures = new ConcurrentHashMap[Uuid, CompletableFuture[_]] + protected val supervisors = new ConcurrentHashMap[Uuid, ActorRef] + protected val pendingRequests = { + if (transactionLogCapacity < 0) new ConcurrentLinkedQueue[(Boolean, Uuid, RemoteMessageProtocol)] + else new LinkedBlockingQueue[(Boolean, Uuid, RemoteMessageProtocol)](transactionLogCapacity) + } + + private[remote] val runSwitch = new Switch() private[remote] val isAuthenticated = new AtomicBoolean(false) private[remote] def isRunning = runSwitch.isOn protected def notifyListeners(msg: => Any); Unit + protected def currentChannel: Channel def connect(reconnectIfAlreadyConnected: Boolean = false): Boolean + def shutdown: Boolean + /** + * Returns an array with the current pending messages not yet delivered. + */ + def pendingMessages: Array[Any] = { + var messages = Vector[Any]() + val iter = pendingRequests.iterator + while (iter.hasNext) { + val (_, _, message) = iter.next + messages = messages :+ MessageSerializer.deserialize(message.getMessage) + } + messages.toArray + } + /** * Converts the message to the wireprotocol and sends the message across the wire */ @@ -192,7 +214,7 @@ abstract class RemoteClient private[akka] ( isOneWay: Boolean, actorRef: ActorRef, typedActorInfo: Option[Tuple2[String, String]], - actorType: AkkaActorType): Option[CompletableFuture[T]] = synchronized { //TODO: find better strategy to prevent race + actorType: AkkaActorType): Option[CompletableFuture[T]] = synchronized { // FIXME: find better strategy to prevent race send(createRemoteMessageProtocolBuilder( Some(actorRef), @@ -213,37 +235,56 @@ abstract class RemoteClient private[akka] ( * Sends the message across the wire */ def send[T]( - request: RemoteMessageProtocol, - senderFuture: Option[CompletableFuture[T]]): Option[CompletableFuture[T]] = { + request: RemoteMessageProtocol, + senderFuture: Option[CompletableFuture[T]]): Option[CompletableFuture[T]] = { if (isRunning) { if (request.getOneWay) { - val future = currentChannel.write(RemoteEncoder.encode(request)) - future.awaitUninterruptibly() - if (!future.isCancelled && !future.isSuccess) { - notifyListeners(RemoteClientWriteFailed(request, future.getCause, module, remoteAddress)) - throw future.getCause + try { + val future = currentChannel.write(RemoteEncoder.encode(request)) + future.awaitUninterruptibly() + if (!future.isCancelled && !future.isSuccess) { + notifyListeners(RemoteClientWriteFailed(request, future.getCause, module, remoteAddress)) + throw future.getCause + } + } catch { + case e: Throwable => + // add the request to the tx log after a failing send + notifyListeners(RemoteClientError(e, module, remoteAddress)) + if (useTransactionLog) { + if (!pendingRequests.offer((true, null, request))) + throw new RemoteClientMessageBufferException("Buffer limit [" + transactionLogCapacity + "] reached") + } + else throw e } - None } else { - val futureResult = if (senderFuture.isDefined) senderFuture.get - else new DefaultCompletableFuture[T](request.getActorInfo.getTimeout) - val futureUuid = uuidFrom(request.getUuid.getHigh, request.getUuid.getLow) - futures.put(futureUuid, futureResult) //Add this prematurely, remove it if write fails - currentChannel.write(RemoteEncoder.encode(request)).addListener(new ChannelFutureListener { - def operationComplete(future: ChannelFuture) { - if (future.isCancelled) { - futures.remove(futureUuid) //Clean this up - //We don't care about that right now - } else if (!future.isSuccess) { - val f = futures.remove(futureUuid) //Clean this up - if (f ne null) - f.completeWithException(future.getCause) - notifyListeners(RemoteClientWriteFailed(request, future.getCause, module, remoteAddress)) - } - } - }) - Some(futureResult) + val futureResult = if (senderFuture.isDefined) senderFuture.get + else new DefaultCompletableFuture[T](request.getActorInfo.getTimeout) + val futureUuid = uuidFrom(request.getUuid.getHigh, request.getUuid.getLow) + futures.put(futureUuid, futureResult) // Add future prematurely, remove it if write fails + + def handleRequestReplyError(future: ChannelFuture) = { + notifyListeners(RemoteClientWriteFailed(request, future.getCause, module, remoteAddress)) + if (useTransactionLog) { + if (!pendingRequests.offer((false, futureUuid, request))) // Add the request to the tx log after a failing send + throw new RemoteClientMessageBufferException("Buffer limit [" + transactionLogCapacity + "] reached") + } else { + val f = futures.remove(futureUuid) // Clean up future + if (f ne null) f.completeWithException(future.getCause) + } + } + + var future: ChannelFuture = null + try { + // try to send the original one + future = currentChannel.write(RemoteEncoder.encode(request)) + future.awaitUninterruptibly() + if (future.isCancelled) futures.remove(futureUuid) // Clean up future + else if (!future.isSuccess) handleRequestReplyError(future) + } catch { + case e: Exception => handleRequestReplyError(future) + } + Some(futureResult) } } else { val exception = new RemoteClientException("Remote client is not running, make sure you have invoked 'RemoteClient.connect' before using it.", module, remoteAddress) @@ -252,6 +293,34 @@ abstract class RemoteClient private[akka] ( } } + private[remote] def sendPendingRequests() = pendingRequests synchronized { // ensure only one thread at a time can flush the log + val nrOfMessages = pendingRequests.size + if (nrOfMessages > 0) EventHandler.info(this, "Resending [%s] previously failed messages after remote client reconnect" format nrOfMessages) + var pendingRequest = pendingRequests.peek + while (pendingRequest ne null) { + val (isOneWay, futureUuid, message) = pendingRequest + if (isOneWay) { // sendOneWay + val future = currentChannel.write(RemoteEncoder.encode(message)) + future.awaitUninterruptibly() + if (!future.isCancelled && !future.isSuccess) { + notifyListeners(RemoteClientWriteFailed(message, future.getCause, module, remoteAddress)) + throw future.getCause + } + } else { // sendRequestReply + val future = currentChannel.write(RemoteEncoder.encode(message)) + future.awaitUninterruptibly() + if (future.isCancelled) futures.remove(futureUuid) // Clean up future + else if (!future.isSuccess) { + val f = futures.remove(futureUuid) // Clean up future + if (f ne null) f.completeWithException(future.getCause) + notifyListeners(RemoteClientWriteFailed(message, future.getCause, module, remoteAddress)) + } + } + pendingRequests.remove(pendingRequest) + pendingRequest = pendingRequests.peek // try to grab next message + } + } + private[akka] def registerSupervisorForActor(actorRef: ActorRef): ActorRef = if (!actorRef.supervisor.isDefined) throw new IllegalActorStateException( "Can't register supervisor for " + actorRef + " since it is not under supervision") @@ -272,6 +341,7 @@ class ActiveRemoteClient private[akka] ( module: NettyRemoteClientModule, remoteAddress: InetSocketAddress, val loader: Option[ClassLoader] = None, notifyListenersFun: (=> Any) => Unit) extends RemoteClient(module, remoteAddress) { import RemoteClientSettings._ + //FIXME rewrite to a wrapper object (minimize volatile access and maximize encapsulation) @volatile private var bootstrap: ClientBootstrap = _ @volatile private[remote] var connection: ChannelFuture = _ @@ -335,13 +405,14 @@ class ActiveRemoteClient private[akka] ( //Please note that this method does _not_ remove the ARC from the NettyRemoteClientModule's map of clients def shutdown = runSwitch switchOff { notifyListeners(RemoteClientShutdown(module, remoteAddress)) - timer.stop + timer.stop() timer = null openChannels.close.awaitUninterruptibly openChannels = null bootstrap.releaseExternalResources bootstrap = null connection = null + pendingRequests.clear } private[akka] def isWithinReconnectionTimeWindow: Boolean = { @@ -456,8 +527,16 @@ class ActiveRemoteClientHandler( } override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { - client.notifyListeners(RemoteClientConnected(client.module, client.remoteAddress)) - client.resetReconnectionTimeWindow + try { + if (client.useTransactionLog) client.sendPendingRequests() // try to send pending requests (still there after client/server crash ard reconnect + client.notifyListeners(RemoteClientConnected(client.module, client.remoteAddress)) + client.resetReconnectionTimeWindow + } catch { + case e: Throwable => + EventHandler.error(e, this, e.getMessage) + client.notifyListeners(RemoteClientError(e, client.module, client.remoteAddress)) + throw e + } } override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { @@ -486,7 +565,7 @@ class ActiveRemoteClientHandler( } catch { case problem: Throwable => EventHandler.error(problem, this, problem.getMessage) - UnparsableException(classname, exception.getMessage) + CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(problem, classname, exception.getMessage) } } } @@ -495,7 +574,8 @@ class ActiveRemoteClientHandler( * Provides the implementation of the Netty remote support */ class NettyRemoteSupport extends RemoteSupport with NettyRemoteServerModule with NettyRemoteClientModule { - //Needed for remote testing and switching on/off under run + + // Needed for remote testing and switching on/off under run val optimizeLocal = new AtomicBoolean(true) def optimizeLocalScoped_?() = optimizeLocal.get @@ -564,7 +644,7 @@ class NettyRemoteServer(serverModule: NettyRemoteServerModule, val host: String, bootstrap.releaseExternalResources serverModule.notifyListeners(RemoteServerShutdown(serverModule)) } catch { - case e: Exception => + case e: Exception => EventHandler.error(e, this, e.getMessage) } } @@ -597,7 +677,7 @@ trait NettyRemoteServerModule extends RemoteServerModule { self: RemoteModule => currentServer.set(Some(new NettyRemoteServer(this, _hostname, _port, loader))) } } catch { - case e: Exception => + case e: Exception => EventHandler.error(e, this, e.getMessage) notifyListeners(RemoteServerError(e, this)) } @@ -649,7 +729,7 @@ trait NettyRemoteServerModule extends RemoteServerModule { self: RemoteModule => private def register[Key](id: Key, actorRef: ActorRef, registry: ConcurrentHashMap[Key, ActorRef]) { if (_isRunning.isOn) { registry.put(id, actorRef) //TODO change to putIfAbsent - if (!actorRef.isRunning) actorRef.start + if (!actorRef.isRunning) actorRef.start() } } @@ -824,14 +904,14 @@ class RemoteServerHandler( // stop all session actors for (map <- Option(sessionActors.remove(event.getChannel)); - actor <- asScalaIterable(map.values)) { + actor <- collectionAsScalaIterable(map.values)) { try { actor ! PoisonPill } catch { case e: Exception => } } //FIXME switch approach or use other thread to execute this // stop all typed session actors for (map <- Option(typedSessionActors.remove(event.getChannel)); - actor <- asScalaIterable(map.values)) { + actor <- collectionAsScalaIterable(map.values)) { try { TypedActor.stop(actor) } catch { case e: Exception => } } @@ -893,7 +973,7 @@ class RemoteServerHandler( message match { // first match on system messages case RemoteActorSystemMessage.Stop => if (UNTRUSTED_MODE) throw new SecurityException("Remote server is operating is untrusted mode, can not stop the actor") - else actorRef.stop + else actorRef.stop() case _: LifeCycleMessage if (UNTRUSTED_MODE) => throw new SecurityException("Remote server is operating is untrusted mode, can not pass on a LifeCycleMessage to the remote actor") @@ -936,9 +1016,15 @@ class RemoteServerHandler( val typedActor = createTypedActor(actorInfo, channel) //FIXME: Add ownerTypeHint and parameter types to the TypedActorInfo? - val (ownerTypeHint, argClasses, args) = MessageSerializer.deserialize(request.getMessage).asInstanceOf[Tuple3[String,Array[Class[_]],Array[AnyRef]]] + val (ownerTypeHint, argClasses, args) = + MessageSerializer + .deserialize(request.getMessage) + .asInstanceOf[Tuple3[String,Array[Class[_]],Array[AnyRef]]] - def resolveMethod(bottomType: Class[_], typeHint: String, methodName: String, methodSignature: Array[Class[_]]): java.lang.reflect.Method = { + def resolveMethod(bottomType: Class[_], + typeHint: String, + methodName: String, + methodSignature: Array[Class[_]]): java.lang.reflect.Method = { var typeToResolve = bottomType var targetMethod: java.lang.reflect.Method = null var firstException: NoSuchMethodException = null @@ -963,7 +1049,7 @@ class RemoteServerHandler( throw firstException targetMethod - } + } try { val messageReceiver = resolveMethod(typedActor.getClass, ownerTypeHint, typedActorInfo.getMethod, argClasses) @@ -988,7 +1074,7 @@ class RemoteServerHandler( write(channel, RemoteEncoder.encode(messageBuilder.build)) } catch { - case e: Exception => + case e: Exception => EventHandler.error(e, this, e.getMessage) server.notifyListeners(RemoteServerError(e, server)) } @@ -1038,7 +1124,7 @@ class RemoteServerHandler( val actorRef = factory() actorRef.uuid = parseUuid(uuid) //FIXME is this sensible? sessionActors.get(channel).put(id, actorRef) - actorRef.start //Start it where's it's created + actorRef.start() //Start it where's it's created } case sessionActor => sessionActor } @@ -1062,7 +1148,7 @@ class RemoteServerHandler( actorRef.id = id actorRef.timeout = timeout server.actorsByUuid.put(actorRef.uuid.toString, actorRef) // register by uuid - actorRef.start //Start it where it's created + actorRef.start() //Start it where it's created } catch { case e: Throwable => EventHandler.error(e, this, e.getMessage) @@ -1144,7 +1230,14 @@ class RemoteServerHandler( server.findTypedActorByIdOrUuid(actorInfo.getId, parseUuid(uuid).toString) match { case null => // the actor has not been registered globally. See if we have it in the session createTypedSessionActor(actorInfo, channel) match { - case null => createClientManagedTypedActor(actorInfo) //Maybe client managed actor? + case null => + // FIXME this is broken, if a user tries to get a server-managed typed actor and that is not registered then a client-managed typed actor is created, but just throwing an exception here causes client-managed typed actors to fail + +/* val e = new RemoteServerException("Can't load remote Typed Actor for [" + actorInfo.getId + "]") + EventHandler.error(e, this, e.getMessage) + server.notifyListeners(RemoteServerError(e, server)) + throw e +*/ createClientManagedTypedActor(actorInfo) // client-managed actor case sessionActor => sessionActor } case typedActor => typedActor @@ -1206,4 +1299,4 @@ class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(na throw new IllegalStateException("ChannelGroup already closed, cannot add new channel") } } -} \ No newline at end of file +} diff --git a/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala b/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala index 7ad0c1e443..f41351f5bc 100644 --- a/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala +++ b/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala @@ -44,7 +44,7 @@ trait Format[T <: Actor] extends FromBinary[T] with ToBinary[T] * } * */ -@serializable trait StatelessActorFormat[T <: Actor] extends Format[T] { +trait StatelessActorFormat[T <: Actor] extends Format[T] with scala.Serializable { def fromBinary(bytes: Array[Byte], act: T) = act def toBinary(ac: T) = Array.empty[Byte] @@ -64,7 +64,7 @@ trait Format[T <: Actor] extends FromBinary[T] with ToBinary[T] * } * */ -@serializable trait SerializerBasedActorFormat[T <: Actor] extends Format[T] { +trait SerializerBasedActorFormat[T <: Actor] extends Format[T] with scala.Serializable { val serializer: Serializer def fromBinary(bytes: Array[Byte], act: T) = serializer.fromBinary(bytes, Some(act.self.actorClass)).asInstanceOf[T] diff --git a/akka-remote/src/main/scala/akka/serialization/Serializer.scala b/akka-remote/src/main/scala/akka/serialization/Serializer.scala index 3a292e0de0..3fc661afce 100644 --- a/akka-remote/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/Serializer.scala @@ -17,7 +17,7 @@ import sjson.json.{Serializer => SJSONSerializer} /** * @author Jonas Bonér */ -@serializable trait Serializer { +trait Serializer extends scala.Serializable { @volatile var classLoader: Option[ClassLoader] = None def deepClone(obj: AnyRef): AnyRef = fromBinary(toBinary(obj), Some(obj.getClass)) diff --git a/akka-remote/src/test/scala/remote/AkkaRemoteTest.scala b/akka-remote/src/test/scala/remote/AkkaRemoteTest.scala index 0c7421df0a..9b2b299d25 100644 --- a/akka-remote/src/test/scala/remote/AkkaRemoteTest.scala +++ b/akka-remote/src/test/scala/remote/AkkaRemoteTest.scala @@ -1,18 +1,18 @@ package akka.actor.remote -import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers -import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} import org.scalatest.junit.JUnitRunner import org.junit.runner.RunWith import akka.remote.netty.NettyRemoteSupport import akka.actor. {Actor, ActorRegistry} import java.util.concurrent. {TimeUnit, CountDownLatch} +import org.scalatest.{Spec, WordSpec, BeforeAndAfterAll, BeforeAndAfterEach} +import java.util.concurrent.atomic.AtomicBoolean object AkkaRemoteTest { class ReplyHandlerActor(latch: CountDownLatch, expect: String) extends Actor { def receive = { - case x: String if x == expect => latch.countDown + case x: String if x == expect => latch.countDown() } } } @@ -52,11 +52,104 @@ class AkkaRemoteTest extends override def afterEach() { remote.shutdown - Actor.registry.shutdownAll + Actor.registry.shutdownAll() super.afterEach } /* Utilities */ - def replyHandler(latch: CountDownLatch, expect: String) = Some(Actor.actorOf(new ReplyHandlerActor(latch, expect)).start) -} \ No newline at end of file + def replyHandler(latch: CountDownLatch, expect: String) = Some(Actor.actorOf(new ReplyHandlerActor(latch, expect)).start()) +} + +trait NetworkFailureTest { self: WordSpec => + import akka.actor.Actor._ + import akka.util.Duration + + // override is subclass if needed + val BYTES_PER_SECOND = "60KByte/s" + val DELAY_MILLIS = "350ms" + val PORT_RANGE = "1024-65535" + + // FIXME add support for TCP FIN by hooking into Netty and do socket.close + + def replyWithTcpResetFor(duration: Duration, dead: AtomicBoolean) = { + spawn { + try { + enableTcpReset() + println("===>>> Reply with [TCP RST] for [" + duration + "]") + Thread.sleep(duration.toMillis) + restoreIP + } catch { + case e => + dead.set(true) + e.printStackTrace + } + } + } + + def throttleNetworkFor(duration: Duration, dead: AtomicBoolean) = { + spawn { + try { + enableNetworkThrottling() + println("===>>> Throttling network with [" + BYTES_PER_SECOND + ", " + DELAY_MILLIS + "] for [" + duration + "]") + Thread.sleep(duration.toMillis) + restoreIP + } catch { + case e => + dead.set(true) + e.printStackTrace + } + } + } + + def dropNetworkFor(duration: Duration, dead: AtomicBoolean) = { + spawn { + try { + enableNetworkDrop() + println("===>>> Blocking network [TCP DENY] for [" + duration + "]") + Thread.sleep(duration.toMillis) + restoreIP + } catch { + case e => + dead.set(true) + e.printStackTrace + } + } + } + + def sleepFor(duration: Duration) = { + println("===>>> Sleeping for [" + duration + "]") + Thread sleep (duration.toMillis) + } + + def enableNetworkThrottling() = { + restoreIP() + assert(new ProcessBuilder("sudo", "ipfw", "add", "pipe", "1", "ip", "from", "any", "to", "any").start.waitFor == 0) + assert(new ProcessBuilder("sudo", "ipfw", "add", "pipe", "2", "ip", "from", "any", "to", "any").start.waitFor == 0) + assert(new ProcessBuilder("sudo", "ipfw", "pipe", "1", "config", "bw", BYTES_PER_SECOND, "delay", DELAY_MILLIS).start.waitFor == 0) + assert(new ProcessBuilder("sudo", "ipfw", "pipe", "2", "config", "bw", BYTES_PER_SECOND, "delay", DELAY_MILLIS).start.waitFor == 0) + } + + def enableNetworkDrop() = { + restoreIP() + assert(new ProcessBuilder("sudo", "ipfw", "add", "1", "deny", "tcp", "from", "any", "to", "any", PORT_RANGE).start.waitFor == 0) + } + + def enableTcpReset() = { + restoreIP() + assert(new ProcessBuilder("sudo", "ipfw", "add", "1", "reset", "tcp", "from", "any", "to", "any", PORT_RANGE).start.waitFor == 0) + } + + def restoreIP() = { + println("===>>> Restoring network") + assert(new ProcessBuilder("sudo", "ipfw", "del", "pipe", "1").start.waitFor == 0) + assert(new ProcessBuilder("sudo", "ipfw", "del", "pipe", "2").start.waitFor == 0) + assert(new ProcessBuilder("sudo", "ipfw", "flush").start.waitFor == 0) + assert(new ProcessBuilder("sudo", "ipfw", "pipe", "flush").start.waitFor == 0) + } + + def validateSudo() = { + println("===>>> Validating sudo") + assert(new ProcessBuilder("sudo", "-v").start.waitFor == 0) + } +} diff --git a/akka-remote/src/test/scala/remote/ClientInitiatedRemoteActorSpec.scala b/akka-remote/src/test/scala/remote/ClientInitiatedRemoteActorSpec.scala index 36fd4ae586..af5aaffcc3 100644 --- a/akka-remote/src/test/scala/remote/ClientInitiatedRemoteActorSpec.scala +++ b/akka-remote/src/test/scala/remote/ClientInitiatedRemoteActorSpec.scala @@ -21,7 +21,7 @@ class RemoteActorSpecActorUnidirectional extends Actor { def receive = { case "OneWay" => - RemoteActorSpecActorUnidirectional.latch.countDown + RemoteActorSpecActorUnidirectional.latch.countDown() } } @@ -42,7 +42,7 @@ class SendOneWayAndReplyReceiverActor extends Actor { class CountDownActor(latch: CountDownLatch) extends Actor { def receive = { - case "World" => latch.countDown + case "World" => latch.countDown() } } /* @@ -59,7 +59,7 @@ class SendOneWayAndReplySenderActor extends Actor { def receive = { case msg: AnyRef => state = Some(msg) - SendOneWayAndReplySenderActor.latch.countDown + SendOneWayAndReplySenderActor.latch.countDown() } }*/ @@ -75,18 +75,18 @@ class MyActorCustomConstructor extends Actor { class ClientInitiatedRemoteActorSpec extends AkkaRemoteTest { "ClientInitiatedRemoteActor" should { "shouldSendOneWay" in { - val clientManaged = remote.actorOf[RemoteActorSpecActorUnidirectional](host,port).start + val clientManaged = remote.actorOf[RemoteActorSpecActorUnidirectional](host,port).start() clientManaged must not be null clientManaged.getClass must be (classOf[LocalActorRef]) clientManaged ! "OneWay" RemoteActorSpecActorUnidirectional.latch.await(1, TimeUnit.SECONDS) must be (true) - clientManaged.stop + clientManaged.stop() } "shouldSendOneWayAndReceiveReply" in { val latch = new CountDownLatch(1) - val actor = remote.actorOf[SendOneWayAndReplyReceiverActor](host,port).start - implicit val sender = Some(actorOf(new CountDownActor(latch)).start) + val actor = remote.actorOf[SendOneWayAndReplyReceiverActor](host,port).start() + implicit val sender = Some(actorOf(new CountDownActor(latch)).start()) actor ! "Hello" @@ -94,23 +94,23 @@ class ClientInitiatedRemoteActorSpec extends AkkaRemoteTest { } "shouldSendBangBangMessageAndReceiveReply" in { - val actor = remote.actorOf[RemoteActorSpecActorBidirectional](host,port).start - val result = actor !! "Hello" + val actor = remote.actorOf[RemoteActorSpecActorBidirectional](host,port).start() + val result = actor !! ("Hello", 10000) "World" must equal (result.get.asInstanceOf[String]) - actor.stop + actor.stop() } "shouldSendBangBangMessageAndReceiveReplyConcurrently" in { - val actors = (1 to 10).map(num => { remote.actorOf[RemoteActorSpecActorBidirectional](host,port).start }).toList - actors.map(_ !!! "Hello") foreach { future => + val actors = (1 to 10).map(num => { remote.actorOf[RemoteActorSpecActorBidirectional](host,port).start() }).toList + actors.map(_ !!! ("Hello", 10000)) foreach { future => "World" must equal (future.await.result.asInstanceOf[Option[String]].get) } - actors.foreach(_.stop) + actors.foreach(_.stop()) } "shouldRegisterActorByUuid" in { - val actor1 = remote.actorOf[MyActorCustomConstructor](host, port).start - val actor2 = remote.actorOf[MyActorCustomConstructor](host, port).start + val actor1 = remote.actorOf[MyActorCustomConstructor](host, port).start() + val actor2 = remote.actorOf[MyActorCustomConstructor](host, port).start() actor1 ! "incrPrefix" @@ -122,13 +122,13 @@ class ClientInitiatedRemoteActorSpec extends AkkaRemoteTest { (actor2 !! "test").get must equal ("default-test") - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } "shouldSendAndReceiveRemoteException" in { - val actor = remote.actorOf[RemoteActorSpecActorBidirectional](host, port).start + val actor = remote.actorOf[RemoteActorSpecActorBidirectional](host, port).start() try { implicit val timeout = 500000000L val f = (actor !!! "Failure").await.resultOrException @@ -136,7 +136,7 @@ class ClientInitiatedRemoteActorSpec extends AkkaRemoteTest { } catch { case e: ExpectedRemoteProblem => } - actor.stop + actor.stop() } } } diff --git a/akka-remote/src/test/scala/remote/OptimizedLocalScopedSpec.scala b/akka-remote/src/test/scala/remote/OptimizedLocalScopedSpec.scala index f6e0c1806f..d5aeccefa9 100644 --- a/akka-remote/src/test/scala/remote/OptimizedLocalScopedSpec.scala +++ b/akka-remote/src/test/scala/remote/OptimizedLocalScopedSpec.scala @@ -14,7 +14,7 @@ class OptimizedLocalScopedSpec extends AkkaRemoteTest { "An enabled optimized local scoped remote" should { "Fetch local actor ref when scope is local" in { - val fooActor = Actor.actorOf[TestActor].start + val fooActor = Actor.actorOf[TestActor].start() remote.register("foo", fooActor) remote.actorFor("foo", host, port) must be (fooActor) diff --git a/akka-remote/src/test/scala/remote/RemoteErrorHandlingNetworkTest.scala b/akka-remote/src/test/scala/remote/RemoteErrorHandlingNetworkTest.scala new file mode 100644 index 0000000000..6b882310e7 --- /dev/null +++ b/akka-remote/src/test/scala/remote/RemoteErrorHandlingNetworkTest.scala @@ -0,0 +1,119 @@ +package akka.actor.remote + +import java.util.concurrent.{CountDownLatch, TimeUnit} + +import akka.actor.Actor._ +import akka.actor.{ActorRef, Actor} +import akka.util.duration._ +import java.util.concurrent.atomic.AtomicBoolean + +object RemoteErrorHandlingNetworkTest { + case class Send(actor: ActorRef) + + class RemoteActorSpecActorUnidirectional extends Actor { + self.id = "network-drop:unidirectional" + def receive = { + case "Ping" => self.reply_?("Pong") + } + } + + class Decrementer extends Actor { + def receive = { + case "done" => self.reply_?(false) + case i: Int if i > 0 => + self.reply_?(i - 1) + case i: Int => + self.reply_?(0) + this become { + case "done" => self.reply_?(true) + case _ => //Do Nothing + } + } + } + + class RemoteActorSpecActorBidirectional extends Actor { + + def receive = { + case "Hello" => + self.reply("World") + case "Failure" => + throw new RuntimeException("Expected exception; to test fault-tolerance") + } + } + + class RemoteActorSpecActorAsyncSender(latch: CountDownLatch) extends Actor { + def receive = { + case Send(actor: ActorRef) => + actor ! "Hello" + case "World" => latch.countDown() + } + } +} + +class RemoteErrorHandlingNetworkTest extends AkkaRemoteTest with NetworkFailureTest { + import RemoteErrorHandlingNetworkTest._ + + "Remote actors" should { + + "be able to recover from network drop without loosing any messages" in { + validateSudo() + val latch = new CountDownLatch(10) + implicit val sender = replyHandler(latch, "Pong") + val service = actorOf[RemoteActorSpecActorUnidirectional] + remote.register(service.id, service) + val actor = remote.actorFor(service.id, 5000L, host, port) + actor ! "Ping" + actor ! "Ping" + actor ! "Ping" + actor ! "Ping" + actor ! "Ping" + val dead = new AtomicBoolean(false) + dropNetworkFor (10 seconds, dead) // drops the network - in another thread - so async + sleepFor (2 seconds) // wait until network drop is done before sending the other messages + try { actor ! "Ping" } catch { case e => () } // queue up messages + try { actor ! "Ping" } catch { case e => () } // ... + try { actor ! "Ping" } catch { case e => () } // ... + try { actor ! "Ping" } catch { case e => () } // ... + try { actor ! "Ping" } catch { case e => () } // ... + latch.await(15, TimeUnit.SECONDS) must be (true) // network should be restored and the messages delivered + dead.get must be (false) + } + + "be able to recover from TCP RESET without loosing any messages" in { + validateSudo() + val latch = new CountDownLatch(10) + implicit val sender = replyHandler(latch, "Pong") + val service = actorOf[RemoteActorSpecActorUnidirectional] + remote.register(service.id, service) + val actor = remote.actorFor(service.id, 5000L, host, port) + actor ! "Ping" + actor ! "Ping" + actor ! "Ping" + actor ! "Ping" + actor ! "Ping" + val dead = new AtomicBoolean(false) + replyWithTcpResetFor (10 seconds, dead) + sleepFor (2 seconds) + try { actor ! "Ping" } catch { case e => () } // queue up messages + try { actor ! "Ping" } catch { case e => () } // ... + try { actor ! "Ping" } catch { case e => () } // ... + try { actor ! "Ping" } catch { case e => () } // ... + try { actor ! "Ping" } catch { case e => () } // ... + latch.await(15, TimeUnit.SECONDS) must be (true) + dead.get must be (false) + } +/* + "sendWithBangAndGetReplyThroughSenderRef" in { + remote.register(actorOf[RemoteActorSpecActorBidirectional]) + implicit val timeout = 500000000L + val actor = remote.actorFor( + "akka.actor.remote.ServerInitiatedRemoteActorSpec$RemoteActorSpecActorBidirectional", timeout, host, port) + val latch = new CountDownLatch(1) + val sender = actorOf( new RemoteActorSpecActorAsyncSender(latch) ).start() + sender ! Send(actor) + latch.await(1, TimeUnit.SECONDS) must be (true) + } + */ + } +} + diff --git a/akka-remote/src/test/scala/remote/RemoteSupervisorSpec.scala b/akka-remote/src/test/scala/remote/RemoteSupervisorSpec.scala index 4026418d18..e5ff681dbc 100644 --- a/akka-remote/src/test/scala/remote/RemoteSupervisorSpec.scala +++ b/akka-remote/src/test/scala/remote/RemoteSupervisorSpec.scala @@ -23,7 +23,7 @@ object Log { } } -@serializable class RemotePingPong1Actor extends Actor { +class RemotePingPong1Actor extends Actor with Serializable { def receive = { case "Ping" => Log.messageLog.put("ping") @@ -41,7 +41,7 @@ object Log { } } -@serializable class RemotePingPong2Actor extends Actor { +class RemotePingPong2Actor extends Actor with Serializable { def receive = { case "Ping" => Log.messageLog.put("ping") @@ -55,7 +55,7 @@ object Log { } } -@serializable class RemotePingPong3Actor extends Actor { +class RemotePingPong3Actor extends Actor with Serializable { def receive = { case "Ping" => Log.messageLog.put("ping") @@ -226,7 +226,7 @@ class RemoteSupervisorSpec extends AkkaRemoteTest { // Then create a concrete container in which we mix in support for the specific // implementation of the Actors we want to use. - pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start + pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start() val factory = SupervisorFactory( SupervisorConfig( @@ -240,7 +240,7 @@ class RemoteSupervisorSpec extends AkkaRemoteTest { } def getSingleActorOneForOneSupervisor: Supervisor = { - pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start + pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start() val factory = SupervisorFactory( SupervisorConfig( @@ -253,9 +253,9 @@ class RemoteSupervisorSpec extends AkkaRemoteTest { } def getMultipleActorsAllForOneConf: Supervisor = { - pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start - pingpong2 = remote.actorOf[RemotePingPong2Actor](host,port).start - pingpong3 = remote.actorOf[RemotePingPong3Actor](host,port).start + pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start() + pingpong2 = remote.actorOf[RemotePingPong2Actor](host,port).start() + pingpong3 = remote.actorOf[RemotePingPong3Actor](host,port).start() val factory = SupervisorFactory( SupervisorConfig( @@ -276,9 +276,9 @@ class RemoteSupervisorSpec extends AkkaRemoteTest { } def getMultipleActorsOneForOneConf: Supervisor = { - pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start - pingpong2 = remote.actorOf[RemotePingPong2Actor](host,port).start - pingpong3 = remote.actorOf[RemotePingPong3Actor](host,port).start + pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start() + pingpong2 = remote.actorOf[RemotePingPong2Actor](host,port).start() + pingpong3 = remote.actorOf[RemotePingPong3Actor](host,port).start() val factory = SupervisorFactory( SupervisorConfig( @@ -299,9 +299,9 @@ class RemoteSupervisorSpec extends AkkaRemoteTest { } def getNestedSupervisorsAllForOneConf: Supervisor = { - pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start - pingpong2 = remote.actorOf[RemotePingPong2Actor](host,port).start - pingpong3 = remote.actorOf[RemotePingPong3Actor](host,port).start + pingpong1 = remote.actorOf[RemotePingPong1Actor](host,port).start() + pingpong2 = remote.actorOf[RemotePingPong2Actor](host,port).start() + pingpong3 = remote.actorOf[RemotePingPong3Actor](host,port).start() val factory = SupervisorFactory( SupervisorConfig( diff --git a/akka-remote/src/test/scala/remote/RemoteTypedActorSpec.scala b/akka-remote/src/test/scala/remote/RemoteTypedActorSpec.scala index c91565eec7..988236b85b 100644 --- a/akka-remote/src/test/scala/remote/RemoteTypedActorSpec.scala +++ b/akka-remote/src/test/scala/remote/RemoteTypedActorSpec.scala @@ -10,7 +10,7 @@ import akka.actor._ import java.util.concurrent.{LinkedBlockingQueue, TimeUnit, BlockingQueue} import akka.config. {RemoteAddress, Config, TypedActorConfigurator} -import akka.Testing +import akka.testing._ object RemoteTypedActorLog { val messageLog: BlockingQueue[String] = new LinkedBlockingQueue[String] @@ -39,13 +39,13 @@ class RemoteTypedActorSpec extends AkkaRemoteTest { classOf[RemoteTypedActorOne], classOf[RemoteTypedActorOneImpl], Permanent, - Testing.time(10000), + Testing.testTime(20000), RemoteAddress(host,port)), new SuperviseTypedActor( classOf[RemoteTypedActorTwo], classOf[RemoteTypedActorTwoImpl], Permanent, - Testing.time(10000), + Testing.testTime(20000), RemoteAddress(host,port)) ).toArray).supervise } diff --git a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSample.scala b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSample.scala index 6b11f73f10..cae866e6e2 100644 --- a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSample.scala +++ b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSample.scala @@ -25,7 +25,7 @@ Have fun. *************************************/ class HelloWorldActor extends Actor { - self.start + self.start() def receive = { case "Hello" => self.reply("World") diff --git a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala index 88a5ec8ec3..b8f4eb2748 100644 --- a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala +++ b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala @@ -42,7 +42,7 @@ object ServerInitiatedRemoteActorSpec { def receive = { case Send(actor: ActorRef) => actor ! "Hello" - case "World" => latch.countDown + case "World" => latch.countDown() } } } @@ -73,7 +73,7 @@ class ServerInitiatedRemoteActorSpec extends AkkaRemoteTest { val actor = remote.actorFor( "akka.actor.remote.ServerInitiatedRemoteActorSpec$RemoteActorSpecActorBidirectional", timeout,host, port) val latch = new CountDownLatch(1) - val sender = actorOf( new RemoteActorSpecActorAsyncSender(latch) ).start + val sender = actorOf( new RemoteActorSpecActorAsyncSender(latch) ).start() sender ! Send(actor) latch.await(1, TimeUnit.SECONDS) must be (true) } @@ -163,7 +163,7 @@ class ServerInitiatedRemoteActorSpec extends AkkaRemoteTest { val actor1 = actorOf[RemoteActorSpecActorUnidirectional] remote.register("foo", actor1) val latch = new CountDownLatch(1) - val actor2 = actorOf(new Actor { def receive = { case "Pong" => latch.countDown } }).start + val actor2 = actorOf(new Actor { def receive = { case "Pong" => latch.countDown() } }).start() val remoteActor = remote.actorFor("foo", host, port) remoteActor.!("Ping")(Some(actor2)) @@ -191,7 +191,7 @@ class ServerInitiatedRemoteActorSpec extends AkkaRemoteTest { if (latch.await(200, TimeUnit.MILLISECONDS)) error("Test didn't complete within 100 cycles") else - latch.countDown + latch.countDown() } val decrementers = Actor.registry.actorsFor[Decrementer] diff --git a/akka-remote/src/test/scala/remote/UnOptimizedLocalScopedSpec.scala b/akka-remote/src/test/scala/remote/UnOptimizedLocalScopedSpec.scala index 001a66eae0..6c6efd9f97 100644 --- a/akka-remote/src/test/scala/remote/UnOptimizedLocalScopedSpec.scala +++ b/akka-remote/src/test/scala/remote/UnOptimizedLocalScopedSpec.scala @@ -13,7 +13,7 @@ class UnOptimizedLocalScopedSpec extends AkkaRemoteTest { "An enabled optimized local scoped remote" should { "Fetch remote actor ref when scope is local" in { - val fooActor = Actor.actorOf[TestActor].start + val fooActor = Actor.actorOf[TestActor].start() remote.register("foo", fooActor) remote.actorFor("foo", host, port) must not be (fooActor) diff --git a/akka-remote/src/test/scala/serialization/ScalaJSONSerializerSpec.scala b/akka-remote/src/test/scala/serialization/ScalaJSONSerializerSpec.scala index 02b29e6de1..cd8f71058e 100644 --- a/akka-remote/src/test/scala/serialization/ScalaJSONSerializerSpec.scala +++ b/akka-remote/src/test/scala/serialization/ScalaJSONSerializerSpec.scala @@ -8,7 +8,7 @@ import org.junit.runner.RunWith import akka.serialization.Serializer.ScalaJSON //TODO: FIXME WHY IS THIS COMMENTED OUT? -/* + object Protocols { import sjson.json.DefaultProtocol._ case class Shop(store: String, item: String, price: Int) @@ -51,4 +51,3 @@ class ScalaJSONSerializerSpec extends } } } -*/ diff --git a/akka-remote/src/test/scala/serialization/SerializableTypeClassActorSpec.scala b/akka-remote/src/test/scala/serialization/SerializableTypeClassActorSpec.scala index 2eec948698..39584726f9 100644 --- a/akka-remote/src/test/scala/serialization/SerializableTypeClassActorSpec.scala +++ b/akka-remote/src/test/scala/serialization/SerializableTypeClassActorSpec.scala @@ -66,63 +66,63 @@ class SerializableTypeClassActorSpec extends it("should be able to serialize and de-serialize a stateful actor") { import BinaryFormatMyActor._ - val actor1 = actorOf[MyActor].start + val actor1 = actorOf[MyActor].start() (actor1 !! "hello").getOrElse("_") should equal("world 1") (actor1 !! "hello").getOrElse("_") should equal("world 2") val bytes = toBinary(actor1) val actor2 = fromBinary(bytes) - actor2.start + actor2.start() (actor2 !! "hello").getOrElse("_") should equal("world 3") } it("should be able to serialize and de-serialize a stateful actor with compound state") { import BinaryFormatMyActorWithDualCounter._ - val actor1 = actorOf[MyActorWithDualCounter].start + val actor1 = actorOf[MyActorWithDualCounter].start() (actor1 !! "hello").getOrElse("_") should equal("world 1 1") (actor1 !! "hello").getOrElse("_") should equal("world 2 2") val bytes = toBinary(actor1) val actor2 = fromBinary(bytes) - actor2.start + actor2.start() (actor2 !! "hello").getOrElse("_") should equal("world 3 3") } it("should be able to serialize and de-serialize a stateless actor") { import BinaryFormatMyStatelessActor._ - val actor1 = actorOf[MyStatelessActor].start + val actor1 = actorOf[MyStatelessActor].start() (actor1 !! "hello").getOrElse("_") should equal("world") (actor1 !! "hello").getOrElse("_") should equal("world") val bytes = toBinary(actor1) val actor2 = fromBinary(bytes) - actor2.start + actor2.start() (actor2 !! "hello").getOrElse("_") should equal("world") } it("should be able to serialize and de-serialize a stateful actor with a given serializer") { import BinaryFormatMyJavaSerializableActor._ - val actor1 = actorOf[MyJavaSerializableActor].start + val actor1 = actorOf[MyJavaSerializableActor].start() (actor1 !! "hello").getOrElse("_") should equal("world 1") (actor1 !! "hello").getOrElse("_") should equal("world 2") val bytes = toBinary(actor1) val actor2 = fromBinary(bytes) - actor2.start + actor2.start() (actor2 !! "hello").getOrElse("_") should equal("world 3") actor2.receiveTimeout should equal (Some(1000)) - actor1.stop - actor2.stop + actor1.stop() + actor2.stop() } it("should be able to serialize and deserialize a MyStatelessActorWithMessagesInMailbox") { import BinaryFormatMyStatelessActorWithMessagesInMailbox._ - val actor1 = actorOf[MyStatelessActorWithMessagesInMailbox].start + val actor1 = actorOf[MyStatelessActorWithMessagesInMailbox].start() (actor1 ! "hello") (actor1 ! "hello") (actor1 ! "hello") @@ -147,7 +147,7 @@ class SerializableTypeClassActorSpec extends it("should be able to serialize and de-serialize an Actor hotswapped with 'become'") { import BinaryFormatMyActor._ - val actor1 = actorOf[MyActor].start + val actor1 = actorOf[MyActor].start() (actor1 !! "hello").getOrElse("_") should equal("world 1") (actor1 !! "hello").getOrElse("_") should equal("world 2") actor1 ! "swap" @@ -155,7 +155,7 @@ class SerializableTypeClassActorSpec extends val bytes = toBinary(actor1) val actor2 = fromBinary(bytes) - actor2.start + actor2.start() (actor1 !! "hello").getOrElse("_") should equal("swapped") @@ -166,7 +166,7 @@ class SerializableTypeClassActorSpec extends it("should be able to serialize and de-serialize an hotswapped actor") { import BinaryFormatMyActor._ - val actor1 = actorOf[MyActor].start + val actor1 = actorOf[MyActor].start() (actor1 !! "hello").getOrElse("_") should equal("world 1") (actor1 !! "hello").getOrElse("_") should equal("world 2") actor1 ! HotSwap { @@ -177,7 +177,7 @@ class SerializableTypeClassActorSpec extends val bytes = toBinary(actor1) val actor2 = fromBinary(bytes) - actor2.start + actor2.start() (actor1 !! "hello").getOrElse("_") should equal("swapped") @@ -190,7 +190,7 @@ class SerializableTypeClassActorSpec extends it("should serialize and de-serialize") { import BinaryFormatMyActorWithSerializableMessages._ - val actor1 = actorOf[MyActorWithSerializableMessages].start + val actor1 = actorOf[MyActorWithSerializableMessages].start() (actor1 ! MyMessage("hello1", ("akka", 100))) (actor1 ! MyMessage("hello2", ("akka", 200))) (actor1 ! MyMessage("hello3", ("akka", 300))) @@ -221,7 +221,7 @@ class MyActorWithDualCounter extends Actor { } } -@serializable class MyActor extends Actor { +class MyActor extends Actor with scala.Serializable { var count = 0 def receive = { @@ -249,7 +249,7 @@ class MyStatelessActorWithMessagesInMailbox extends Actor { } } -@serializable class MyJavaSerializableActor extends Actor { +class MyJavaSerializableActor extends Actor with scala.Serializable { var count = 0 self.receiveTimeout = Some(1000) diff --git a/akka-remote/src/test/scala/serialization/Ticket435Spec.scala b/akka-remote/src/test/scala/serialization/Ticket435Spec.scala index a6193d9914..1697367b33 100644 --- a/akka-remote/src/test/scala/serialization/Ticket435Spec.scala +++ b/akka-remote/src/test/scala/serialization/Ticket435Spec.scala @@ -39,7 +39,7 @@ class Ticket435Spec extends it("should be able to serialize and deserialize a stateless actor with messages in mailbox") { import BinaryFormatMyStatelessActorWithMessagesInMailbox._ - val actor1 = actorOf[MyStatelessActorWithMessagesInMailbox].start + val actor1 = actorOf[MyStatelessActorWithMessagesInMailbox].start() (actor1 ! "hello") (actor1 ! "hello") (actor1 ! "hello") @@ -65,7 +65,7 @@ class Ticket435Spec extends it("should serialize the mailbox optionally") { import BinaryFormatMyStatelessActorWithMessagesInMailbox._ - val actor1 = actorOf[MyStatelessActorWithMessagesInMailbox].start + val actor1 = actorOf[MyStatelessActorWithMessagesInMailbox].start() (actor1 ! "hello") (actor1 ! "hello") (actor1 ! "hello") @@ -87,7 +87,7 @@ class Ticket435Spec extends it("should be able to serialize and deserialize a stateful actor with messages in mailbox") { import BinaryFormatMyStatefulActor._ - val actor1 = actorOf[MyStatefulActor].start + val actor1 = actorOf[MyStatefulActor].start() (actor1 ! "hi") (actor1 ! "hi") (actor1 ! "hi") diff --git a/akka-remote/src/test/scala/serialization/UntypedActorSerializationSpec.scala b/akka-remote/src/test/scala/serialization/UntypedActorSerializationSpec.scala index b9752d2d55..e33de43571 100644 --- a/akka-remote/src/test/scala/serialization/UntypedActorSerializationSpec.scala +++ b/akka-remote/src/test/scala/serialization/UntypedActorSerializationSpec.scala @@ -43,37 +43,37 @@ class UntypedActorSerializationSpec extends describe("Serializable untyped actor") { it("should be able to serialize and de-serialize a stateful untyped actor") { - val actor1 = Actors.actorOf(classOf[MyUntypedActor]).start + val actor1 = Actors.actorOf(classOf[MyUntypedActor]).start() actor1.sendRequestReply("hello") should equal("world 1") actor1.sendRequestReply("debasish") should equal("hello debasish 2") val f = new MyUntypedActorFormat val bytes = toBinaryJ(actor1, f) val actor2 = fromBinaryJ(bytes, f) - actor2.start + actor2.start() actor2.sendRequestReply("hello") should equal("world 3") } it("should be able to serialize and de-serialize a stateful actor with compound state") { - val actor1 = actorOf[MyUntypedActorWithDualCounter].start + val actor1 = actorOf[MyUntypedActorWithDualCounter].start() actor1.sendRequestReply("hello") should equal("world 1 1") actor1.sendRequestReply("hello") should equal("world 2 2") val f = new MyUntypedActorWithDualCounterFormat val bytes = toBinaryJ(actor1, f) val actor2 = fromBinaryJ(bytes, f) - actor2.start + actor2.start() actor2.sendRequestReply("hello") should equal("world 3 3") } it("should be able to serialize and de-serialize a stateless actor") { - val actor1 = actorOf[MyUntypedStatelessActor].start + val actor1 = actorOf[MyUntypedStatelessActor].start() actor1.sendRequestReply("hello") should equal("world") actor1.sendRequestReply("hello") should equal("world") val bytes = toBinaryJ(actor1, MyUntypedStatelessActorFormat) val actor2 = fromBinaryJ(bytes, MyUntypedStatelessActorFormat) - actor2.start + actor2.start() actor2.sendRequestReply("hello") should equal("world") } } diff --git a/akka-remote/src/test/scala/ticket/Ticket506Spec.scala b/akka-remote/src/test/scala/ticket/Ticket506Spec.scala index cd58c4a6bd..edd65fe47f 100644 --- a/akka-remote/src/test/scala/ticket/Ticket506Spec.scala +++ b/akka-remote/src/test/scala/ticket/Ticket506Spec.scala @@ -16,7 +16,7 @@ class ActorRefService(latch: CountDownLatch) extends Actor { case RecvActorRef(bytes) => val ref = RemoteActorSerialization.fromBinaryToRemoteActorRef(bytes) ref ! "hello" - case "hello" => latch.countDown + case "hello" => latch.countDown() } } diff --git a/akka-samples/akka-sample-ants/src/main/scala/Ants.scala b/akka-samples/akka-sample-ants/src/main/scala/Ants.scala index 1eb38d6f0f..ebcf1d0a79 100644 --- a/akka-samples/akka-sample-ants/src/main/scala/Ants.scala +++ b/akka-samples/akka-sample-ants/src/main/scala/Ants.scala @@ -65,7 +65,7 @@ object World { val homeOff = Dim / 4 lazy val places = Vector.fill(Dim, Dim)(new Place) lazy val ants = setup - lazy val evaporator = actorOf[Evaporator].start + lazy val evaporator = actorOf[Evaporator].start() private val snapshotFactory = TransactionFactory(readonly = true, familyName = "snapshot") @@ -81,7 +81,7 @@ object World { for (x <- homeRange; y <- homeRange) yield { place(x, y).makeHome place(x, y) enter Ant(randomInt(8)) - actorOf(new AntActor(x, y)).start + actorOf(new AntActor(x, y)).start() } } diff --git a/akka-samples/akka-sample-chat/README b/akka-samples/akka-sample-chat/README new file mode 100644 index 0000000000..3606097d6f --- /dev/null +++ b/akka-samples/akka-sample-chat/README @@ -0,0 +1,26 @@ +Akka Chat Client/Server Sample Application + +How to run the sample: + +1. Fire up two shells. For each of them: + - Step down into to the root of the Akka distribution. + - Set 'export AKKA_HOME=. + - Run 'sbt console' to start up a REPL (interpreter). +2. In the first REPL you get execute: + - scala> import sample.chat._ + - scala> import akka.actor.Actor._ + - scala> val chatService = actorOf[ChatService].start() +3. In the second REPL you get execute: + - scala> import sample.chat._ + - scala> ClientRunner.run +4. See the chat simulation run. +5. Run it again to see full speed after first initialization. +6. In the client REPL, or in a new REPL, you can also create your own client + - scala> import sample.chat._ + - scala> val myClient = new ChatClient("") + - scala> myClient.login + - scala> myClient.post("Can I join?") + - scala> println("CHAT LOG:\n\t" + myClient.chatLog.log.mkString("\n\t")) + +That’s it. Have fun. + diff --git a/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala b/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala new file mode 100644 index 0000000000..a19ed26da0 --- /dev/null +++ b/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala @@ -0,0 +1,254 @@ + /** + * Copyright (C) 2009-2010 Scalable Solutions AB . + */ + + package sample.chat + + import scala.collection.mutable.HashMap + + import akka.actor.{SupervisorFactory, Actor, ActorRef} + import akka.stm._ + import akka.config.Supervision.{OneForOneStrategy,Permanent} + import Actor._ + import akka.event.EventHandler + + /****************************************************************************** + Akka Chat Client/Server Sample Application + + How to run the sample: + + 1. Fire up two shells. For each of them: + - Step down into to the root of the Akka distribution. + - Set 'export AKKA_HOME=. + - Run 'sbt console' to start up a REPL (interpreter). + 2. In the first REPL you get execute: + - scala> import sample.chat._ + - scala> import akka.actor.Actor._ + - scala> val chatService = actorOf[ChatService].start() + 3. In the second REPL you get execute: + - scala> import sample.chat._ + - scala> ClientRunner.run + 4. See the chat simulation run. + 5. Run it again to see full speed after first initialization. + 6. In the client REPL, or in a new REPL, you can also create your own client + - scala> import sample.chat._ + - scala> val myClient = new ChatClient("") + - scala> myClient.login + - scala> myClient.post("Can I join?") + - scala> println("CHAT LOG:\n\t" + myClient.chatLog.log.mkString("\n\t")) + + + That’s it. Have fun. + + ******************************************************************************/ + + /** + * ChatServer's internal events. + */ + sealed trait Event + case class Login(user: String) extends Event + case class Logout(user: String) extends Event + case class GetChatLog(from: String) extends Event + case class ChatLog(log: List[String]) extends Event + case class ChatMessage(from: String, message: String) extends Event + + /** + * Chat client. + */ + class ChatClient(val name: String) { + val chat = Actor.remote.actorFor("chat:service", "localhost", 2552) + + def login = chat ! Login(name) + def logout = chat ! Logout(name) + def post(message: String) = chat ! ChatMessage(name, name + ": " + message) + def chatLog = (chat !! GetChatLog(name)).as[ChatLog].getOrElse(throw new Exception("Couldn't get the chat log from ChatServer")) + } + + /** + * Internal chat client session. + */ + class Session(user: String, storage: ActorRef) extends Actor { + private val loginTime = System.currentTimeMillis + private var userLog: List[String] = Nil + + EventHandler.info(this, "New session for user [%s] has been created at [%s]".format(user, loginTime)) + + def receive = { + case msg @ ChatMessage(from, message) => + userLog ::= message + storage ! msg + + case msg @ GetChatLog(_) => + storage forward msg + } + } + + /** + * Abstraction of chat storage holding the chat log. + */ + trait ChatStorage extends Actor + + /** + * Memory-backed chat storage implementation. + */ + class MemoryChatStorage extends ChatStorage { + self.lifeCycle = Permanent + + private var chatLog = TransactionalVector[Array[Byte]]() + + EventHandler.info(this, "Memory-based chat storage is starting up...") + + def receive = { + case msg @ ChatMessage(from, message) => + EventHandler.debug(this, "New chat message [%s]".format(message)) + atomic { chatLog + message.getBytes("UTF-8") } + + case GetChatLog(_) => + val messageList = atomic { chatLog.map(bytes => new String(bytes, "UTF-8")).toList } + self.reply(ChatLog(messageList)) + } + + override def postRestart(reason: Throwable) = chatLog = TransactionalVector() + } + + /** + * Implements user session management. + *

+ * Uses self-type annotation (this: Actor =>) to declare that it needs to be mixed in with an Actor. + */ + trait SessionManagement { this: Actor => + + val storage: ActorRef // needs someone to provide the ChatStorage + val sessions = new HashMap[String, ActorRef] + + protected def sessionManagement: Receive = { + case Login(username) => + EventHandler.info(this, "User [%s] has logged in".format(username)) + val session = actorOf(new Session(username, storage)) + session.start() + sessions += (username -> session) + + case Logout(username) => + EventHandler.info(this, "User [%s] has logged out".format(username)) + val session = sessions(username) + session.stop() + sessions -= username + } + + protected def shutdownSessions = + sessions.foreach { case (_, session) => session.stop() } + } + + /** + * Implements chat management, e.g. chat message dispatch. + *

+ * Uses self-type annotation (this: Actor =>) to declare that it needs to be mixed in with an Actor. + */ + trait ChatManagement { this: Actor => + val sessions: HashMap[String, ActorRef] // needs someone to provide the Session map + + protected def chatManagement: Receive = { + case msg @ ChatMessage(from, _) => getSession(from).foreach(_ ! msg) + case msg @ GetChatLog(from) => getSession(from).foreach(_ forward msg) + } + + private def getSession(from: String) : Option[ActorRef] = { + if (sessions.contains(from)) + Some(sessions(from)) + else { + EventHandler.info(this, "Session expired for %s".format(from)) + None + } + } + } + + /** + * Creates and links a MemoryChatStorage. + */ + trait MemoryChatStorageFactory { this: Actor => + val storage = this.self.spawnLink[MemoryChatStorage] // starts and links ChatStorage + } + + /** + * Chat server. Manages sessions and redirects all other messages to the Session for the client. + */ + trait ChatServer extends Actor { + self.faultHandler = OneForOneStrategy(List(classOf[Exception]),5, 5000) + val storage: ActorRef + + EventHandler.info(this, "Chat server is starting up...") + + // actor message handler + def receive: Receive = sessionManagement orElse chatManagement + + // abstract methods to be defined somewhere else + protected def chatManagement: Receive + protected def sessionManagement: Receive + protected def shutdownSessions(): Unit + + override def postStop = { + EventHandler.info(this, "Chat server is shutting down...") + shutdownSessions + self.unlink(storage) + storage.stop() + } + } + + /** + * Class encapsulating the full Chat Service. + * Start service by invoking: + *

+   * val chatService = Actor.actorOf[ChatService].start()
+   * 
+ */ + class ChatService extends + ChatServer with + SessionManagement with + ChatManagement with + MemoryChatStorageFactory { + override def preStart = { + remote.start("localhost", 2552); + remote.register("chat:service", self) //Register the actor with the specified service id + } + } + + /** + * Test runner starting ChatService. + */ + object ServerRunner { + + def main(args: Array[String]): Unit = ServerRunner.run + + def run = { + actorOf[ChatService].start() + } + } + + /** + * Test runner emulating a chat session. + */ + object ClientRunner { + + def main(args: Array[String]): Unit = ClientRunner.run + + def run = { + + val client1 = new ChatClient("jonas") + client1.login + val client2 = new ChatClient("patrik") + client2.login + + client1.post("Hi there") + println("CHAT LOG:\n\t" + client1.chatLog.log.mkString("\n\t")) + + client2.post("Hello") + println("CHAT LOG:\n\t" + client2.chatLog.log.mkString("\n\t")) + + client1.post("Hi again") + println("CHAT LOG:\n\t" + client1.chatLog.log.mkString("\n\t")) + + client1.logout + client2.logout + } + } + diff --git a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala index 5a2c580e5e..6c836b290a 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala @@ -127,11 +127,11 @@ class Hakker(name: String,left: ActorRef, right: ActorRef) extends Actor { object DiningHakkers { def run { //Create 5 chopsticks - val chopsticks = for(i <- 1 to 5) yield actorOf(new Chopstick("Chopstick "+i)).start + val chopsticks = for(i <- 1 to 5) yield actorOf(new Chopstick("Chopstick "+i)).start() //Create 5 awesome hakkers and assign them their left and right chopstick val hakkers = for { (name,i) <- List("Ghosh","Bonér","Klang","Krasser","Manie").zipWithIndex - } yield actorOf(new Hakker(name,chopsticks(i),chopsticks((i+1) % 5))).start + } yield actorOf(new Hakker(name,chopsticks(i),chopsticks((i+1) % 5))).start() //Signal all hakkers that they should start thinking, and watch the show hakkers.foreach(_ ! Think) diff --git a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala index 3273136690..e63dbc3dfa 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala @@ -168,11 +168,11 @@ object DiningHakkersOnFsm { def run = { // Create 5 chopsticks - val chopsticks = for (i <- 1 to 5) yield actorOf(new Chopstick("Chopstick " + i)).start + val chopsticks = for (i <- 1 to 5) yield actorOf(new Chopstick("Chopstick " + i)).start() // Create 5 awesome fsm hakkers and assign them their left and right chopstick val hakkers = for{ (name, i) <- List("Ghosh", "Bonér", "Klang", "Krasser", "Manie").zipWithIndex - } yield actorOf(new FSMHakker(name, chopsticks(i), chopsticks((i + 1) % 5))).start + } yield actorOf(new FSMHakker(name, chopsticks(i), chopsticks((i + 1) % 5))).start() hakkers.foreach(_ ! Think) } diff --git a/akka-samples/akka-sample-remote/src/main/scala/ClientManagedRemoteActorSample.scala b/akka-samples/akka-sample-remote/src/main/scala/ClientManagedRemoteActorSample.scala index 3477ff5783..42450b0b39 100644 --- a/akka-samples/akka-sample-remote/src/main/scala/ClientManagedRemoteActorSample.scala +++ b/akka-samples/akka-sample-remote/src/main/scala/ClientManagedRemoteActorSample.scala @@ -26,7 +26,7 @@ object ClientManagedRemoteActorServer { object ClientManagedRemoteActorClient { def run = { - val actor = remote.actorOf[RemoteHelloWorldActor]("localhost",2552).start + val actor = remote.actorOf[RemoteHelloWorldActor]("localhost",2552).start() val result = actor !! "Hello" } diff --git a/akka-sbt-plugin/src/main/scala/AkkaProject.scala b/akka-sbt-plugin/src/main/scala/AkkaProject.scala deleted file mode 100644 index fdecf65276..0000000000 --- a/akka-sbt-plugin/src/main/scala/AkkaProject.scala +++ /dev/null @@ -1,72 +0,0 @@ -import sbt._ - -object AkkaRepositories { - val AkkaRepo = MavenRepository("Akka Repository", "http://akka.io/repository") - val ScalaToolsRepo = MavenRepository("Scala-Tools Repo", "http://scala-tools.org/repo-releases") - val ClojarsRepo = MavenRepository("Clojars Repo", "http://clojars.org/repo") - val CodehausRepo = MavenRepository("Codehaus Repo", "http://repository.codehaus.org") - val GuiceyFruitRepo = MavenRepository("GuiceyFruit Repo", "http://guiceyfruit.googlecode.com/svn/repo/releases/") - val JBossRepo = MavenRepository("JBoss Repo", "http://repository.jboss.org/nexus/content/groups/public/") - val JavaNetRepo = MavenRepository("java.net Repo", "http://download.java.net/maven/2") - val MsgPackRepo = MavenRepository("Message Pack Releases Repo","http://msgpack.sourceforge.net/maven2/") - val SonatypeSnapshotRepo = MavenRepository("Sonatype OSS Repo", "http://oss.sonatype.org/content/repositories/releases") - val SunJDMKRepo = MavenRepository("Sun JDMK Repo", "http://wp5.e-taxonomy.eu/cdmlib/mavenrepo") - val TerrastoreRepo = MavenRepository("Terrastore Releases Repo", "http://m2.terrastore.googlecode.com/hg/repo") - val ZookeeperRepo = MavenRepository("Zookeeper Repo", "http://lilycms.org/maven/maven2/deploy/") -} - -trait AkkaBaseProject extends BasicScalaProject { - import AkkaRepositories._ - - // Every dependency that cannot be resolved from the built-in repositories (Maven Central and Scala Tools Releases) - // is resolved from a ModuleConfiguration. This will result in a significant acceleration of the update action. - - // for development version resolve to .ivy2/local - // release: val akkaModuleConfig = ModuleConfiguration("se.scalablesolutions.akka", AkkaRepo) - - val aspectwerkzModuleConfig = ModuleConfiguration("org.codehaus.aspectwerkz", AkkaRepo) - val cassandraModuleConfig = ModuleConfiguration("org.apache.cassandra", AkkaRepo) - val eaioModuleConfig = ModuleConfiguration("com.eaio", AkkaRepo) - val facebookModuleConfig = ModuleConfiguration("com.facebook", AkkaRepo) - val h2lzfModuleConfig = ModuleConfiguration("voldemort.store.compress", AkkaRepo) - val hbaseModuleConfig = ModuleConfiguration("org.apache.hbase", AkkaRepo) - val memcachedModuleConfig = ModuleConfiguration("spy", "memcached", AkkaRepo) - val netLagModuleConfig = ModuleConfiguration("net.lag", AkkaRepo) - val redisModuleConfig = ModuleConfiguration("com.redis", AkkaRepo) - val sjsonModuleConfig = ModuleConfiguration("sjson.json", AkkaRepo) - val triforkModuleConfig = ModuleConfiguration("com.trifork", AkkaRepo) - val vscaladocModuleConfig = ModuleConfiguration("org.scala-tools", "vscaladoc", "1.1-md-3", AkkaRepo) - - val args4jModuleConfig = ModuleConfiguration("args4j", JBossRepo) - val atmosphereModuleConfig = ModuleConfiguration("org.atmosphere", SonatypeSnapshotRepo) - val casbahModuleConfig = ModuleConfiguration("com.mongodb.casbah", ScalaToolsRepo) - val grizzlyModuleConfig = ModuleConfiguration("com.sun.grizzly", JavaNetRepo) - val guiceyFruitModuleConfig = ModuleConfiguration("org.guiceyfruit", GuiceyFruitRepo) - val jbossModuleConfig = ModuleConfiguration("org.jboss", JBossRepo) - val jdmkModuleConfig = ModuleConfiguration("com.sun.jdmk", SunJDMKRepo) - val jmsModuleConfig = ModuleConfiguration("javax.jms", SunJDMKRepo) - val jmxModuleConfig = ModuleConfiguration("com.sun.jmx", SunJDMKRepo) - val jerseyContrModuleConfig = ModuleConfiguration("com.sun.jersey.contribs", JavaNetRepo) - val jerseyModuleConfig = ModuleConfiguration("com.sun.jersey", JavaNetRepo) - val jgroupsModuleConfig = ModuleConfiguration("jgroups", JBossRepo) - val jsr166yModuleConfig = ModuleConfiguration("jsr166y", TerrastoreRepo) - val msgPackModuleConfig = ModuleConfiguration("org.msgpack", MsgPackRepo) - val multiverseModuleConfig = ModuleConfiguration("org.multiverse", CodehausRepo) - val nettyModuleConfig = ModuleConfiguration("org.jboss.netty", JBossRepo) - val resteasyModuleConfig = ModuleConfiguration("org.jboss.resteasy", JBossRepo) - val scannotationModuleConfig= ModuleConfiguration("org.scannotation", JBossRepo) - val terrastoreModuleConfig = ModuleConfiguration("terrastore", TerrastoreRepo) - val timeModuleConfig = ModuleConfiguration("org.scala-tools", "time", ScalaToolsRepo) - val voldemortModuleConfig = ModuleConfiguration("voldemort", ClojarsRepo) - val zookeeperModuleConfig = ModuleConfiguration("org.apache.hadoop.zookeeper", ZookeeperRepo) -} - -trait AkkaProject extends AkkaBaseProject { - val akkaVersion = "1.1-SNAPSHOT" - - // convenience method - def akkaModule(module: String) = "se.scalablesolutions.akka" % ("akka-" + module) % akkaVersion - - // akka actor dependency by default - val akkaActor = akkaModule("actor") -} diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala index fa1d4e713e..b8fb70f491 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala @@ -39,7 +39,7 @@ class Slf4jEventHandler extends Actor with Logging { def receive = { case Error(cause, instance, message) => log.error("\n\t[{}]\n\t[{}]\n\t[{}]", - Array[AnyRef](instance.getClass.getName, message, stackTraceFor(cause))) + Array[Any](instance.getClass.getName, message, stackTraceFor(cause))) case Warning(instance, message) => log.warn("\n\t[{}]\n\t[{}]", instance.getClass.getName, message) diff --git a/akka-stm/src/main/scala/akka/agent/Agent.scala b/akka-stm/src/main/scala/akka/agent/Agent.scala index 2332f28b13..80db8bff21 100644 --- a/akka-stm/src/main/scala/akka/agent/Agent.scala +++ b/akka-stm/src/main/scala/akka/agent/Agent.scala @@ -94,7 +94,7 @@ object Agent { */ class Agent[T](initialValue: T) { private[akka] val ref = Ref(initialValue) - private[akka] val updater = Actor.actorOf(new AgentUpdater(this)).start + private[akka] val updater = Actor.actorOf(new AgentUpdater(this)).start() /** * Read the internal state of the agent. @@ -135,7 +135,7 @@ class Agent[T](initialValue: T) { */ def sendOff(f: T => T): Unit = send((value: T) => { suspend - val threadBased = Actor.actorOf(new ThreadBasedAgentUpdater(this)).start + val threadBased = Actor.actorOf(new ThreadBasedAgentUpdater(this)).start() threadBased ! Update(f) value }) @@ -182,7 +182,7 @@ class Agent[T](initialValue: T) { * Closes the agents and makes it eligable for garbage collection. * A closed agent cannot accept any `send` actions. */ - def close() = updater.stop + def close() = updater.stop() // --------------------------------------------- // Support for Java API Functions and Procedures @@ -250,8 +250,8 @@ class ThreadBasedAgentUpdater[T](agent: Agent[T]) extends Actor { case update: Update[T] => { atomic(txFactory) { agent.ref alter update.function } agent.resume - self.stop + self.stop() } - case _ => self.stop + case _ => self.stop() } } diff --git a/akka-stm/src/main/scala/akka/stm/Ref.scala b/akka-stm/src/main/scala/akka/stm/Ref.scala index 74b1bf5a9e..5d1aa9dc96 100644 --- a/akka-stm/src/main/scala/akka/stm/Ref.scala +++ b/akka-stm/src/main/scala/akka/stm/Ref.scala @@ -11,7 +11,7 @@ import org.multiverse.transactional.refs.BasicRef /** * Common trait for all the transactional objects. */ -@serializable trait Transactional { +trait Transactional extends Serializable { val uuid: String } diff --git a/akka-stm/src/test/java/akka/transactor/example/UntypedCoordinatedCounter.java b/akka-stm/src/test/java/akka/transactor/example/UntypedCoordinatedCounter.java index 658ec71ff4..0fd24ac9a7 100644 --- a/akka-stm/src/test/java/akka/transactor/example/UntypedCoordinatedCounter.java +++ b/akka-stm/src/test/java/akka/transactor/example/UntypedCoordinatedCounter.java @@ -7,7 +7,7 @@ import akka.actor.UntypedActor; import akka.stm.Ref; public class UntypedCoordinatedCounter extends UntypedActor { - private Ref count = new Ref(0); + private Ref count = new Ref(0); private void increment() { System.out.println("incrementing"); diff --git a/akka-stm/src/test/java/akka/transactor/test/UntypedCoordinatedCounter.java b/akka-stm/src/test/java/akka/transactor/test/UntypedCoordinatedCounter.java index 6f09a10173..3fc2ca502b 100644 --- a/akka-stm/src/test/java/akka/transactor/test/UntypedCoordinatedCounter.java +++ b/akka-stm/src/test/java/akka/transactor/test/UntypedCoordinatedCounter.java @@ -16,7 +16,7 @@ import java.util.concurrent.TimeUnit; public class UntypedCoordinatedCounter extends UntypedActor { private String name; - private Ref count = new Ref(0); + private Ref count = new Ref(0); private TransactionFactory txFactory = new TransactionFactoryBuilder() .setTimeout(new FiniteDuration(3, TimeUnit.SECONDS)) .build(); diff --git a/akka-stm/src/test/scala/agent/AgentSpec.scala b/akka-stm/src/test/scala/agent/AgentSpec.scala index 6a9c36dbe0..ed07dea6bd 100644 --- a/akka-stm/src/test/scala/agent/AgentSpec.scala +++ b/akka-stm/src/test/scala/agent/AgentSpec.scala @@ -12,7 +12,7 @@ import java.util.concurrent.CountDownLatch class CountDownFunction[A](num: Int = 1) extends Function1[A, A] { val latch = new CountDownLatch(num) - def apply(a: A) = { latch.countDown; a } + def apply(a: A) = { latch.countDown(); a } def await(timeout: Duration) = latch.await(timeout.length, timeout.unit) } @@ -61,7 +61,7 @@ class AgentSpec extends WordSpec with MustMatchers { } agent send f1 val read = agent() - readLatch.countDown + readLatch.countDown() agent send countDown countDown.await(5 seconds) diff --git a/akka-stm/src/test/scala/config/ConfigSpec.scala b/akka-stm/src/test/scala/config/ConfigSpec.scala index 4108a99d63..8636254ced 100644 --- a/akka-stm/src/test/scala/config/ConfigSpec.scala +++ b/akka-stm/src/test/scala/config/ConfigSpec.scala @@ -16,8 +16,6 @@ class ConfigSpec extends WordSpec with MustMatchers { "contain all configuration properties for akka-stm that are used in code with their correct defaults" in { import Config.config._ - getInt("akka.storage.max-retries") must equal(Some(10)) - getBool("akka.stm.blocking-allowed") must equal(Some(false)) getBool("akka.stm.fair") must equal(Some(true)) getBool("akka.stm.interruptible") must equal(Some(false)) diff --git a/akka-stm/src/test/scala/transactor/CoordinatedIncrementSpec.scala b/akka-stm/src/test/scala/transactor/CoordinatedIncrementSpec.scala index 367ef5ac5f..116baa7da7 100644 --- a/akka-stm/src/test/scala/transactor/CoordinatedIncrementSpec.scala +++ b/akka-stm/src/test/scala/transactor/CoordinatedIncrementSpec.scala @@ -51,9 +51,9 @@ class CoordinatedIncrementSpec extends WordSpec with MustMatchers { val timeout = 5 seconds def createActors = { - def createCounter(i: Int) = Actor.actorOf(new Counter("counter" + i)).start + def createCounter(i: Int) = Actor.actorOf(new Counter("counter" + i)).start() val counters = (1 to numCounters) map createCounter - val failer = Actor.actorOf(new Failer).start + val failer = Actor.actorOf(new Failer).start() (counters, failer) } @@ -66,8 +66,8 @@ class CoordinatedIncrementSpec extends WordSpec with MustMatchers { for (counter <- counters) { (counter !! GetCount).get must be === 1 } - counters foreach (_.stop) - failer.stop + counters foreach (_.stop()) + failer.stop() } "increment no counters with a failing transaction" in { @@ -78,8 +78,8 @@ class CoordinatedIncrementSpec extends WordSpec with MustMatchers { for (counter <- counters) { (counter !! GetCount).get must be === 0 } - counters foreach (_.stop) - failer.stop + counters foreach (_.stop()) + failer.stop() } } } diff --git a/akka-stm/src/test/scala/transactor/FickleFriendsSpec.scala b/akka-stm/src/test/scala/transactor/FickleFriendsSpec.scala index b6f8405e08..2c953fb36a 100644 --- a/akka-stm/src/test/scala/transactor/FickleFriendsSpec.scala +++ b/akka-stm/src/test/scala/transactor/FickleFriendsSpec.scala @@ -42,7 +42,7 @@ object FickleFriends { increment deferred { success = true - latch.countDown + latch.countDown() } } } catch { @@ -97,9 +97,9 @@ class FickleFriendsSpec extends WordSpec with MustMatchers { val numCounters = 2 def createActors = { - def createCounter(i: Int) = Actor.actorOf(new FickleCounter("counter" + i)).start + def createCounter(i: Int) = Actor.actorOf(new FickleCounter("counter" + i)).start() val counters = (1 to numCounters) map createCounter - val coordinator = Actor.actorOf(new Coordinator("coordinator")).start + val coordinator = Actor.actorOf(new Coordinator("coordinator")).start() (counters, coordinator) } @@ -113,8 +113,8 @@ class FickleFriendsSpec extends WordSpec with MustMatchers { for (counter <- counters) { (counter !! GetCount).get must be === 1 } - counters foreach (_.stop) - coordinator.stop + counters foreach (_.stop()) + coordinator.stop() } } } diff --git a/akka-stm/src/test/scala/transactor/TransactorSpec.scala b/akka-stm/src/test/scala/transactor/TransactorSpec.scala index 212abff1d4..f44b6f58fa 100644 --- a/akka-stm/src/test/scala/transactor/TransactorSpec.scala +++ b/akka-stm/src/test/scala/transactor/TransactorSpec.scala @@ -37,8 +37,8 @@ object TransactorIncrement { def atomically = { case Increment(friends, latch) => { increment - deferred { latch.countDown } - compensating { latch.countDown } + deferred { latch.countDown() } + compensating { latch.countDown() } } } @@ -65,7 +65,7 @@ object SimpleTransactor { def atomically = { case Set(ref, value, latch) => { ref.set(value) - latch.countDown + latch.countDown() } } } @@ -79,9 +79,9 @@ class TransactorSpec extends WordSpec with MustMatchers { val timeout = 5 seconds def createTransactors = { - def createCounter(i: Int) = Actor.actorOf(new Counter("counter" + i)).start + def createCounter(i: Int) = Actor.actorOf(new Counter("counter" + i)).start() val counters = (1 to numCounters) map createCounter - val failer = Actor.actorOf(new Failer).start + val failer = Actor.actorOf(new Failer).start() (counters, failer) } @@ -94,8 +94,8 @@ class TransactorSpec extends WordSpec with MustMatchers { for (counter <- counters) { (counter !! GetCount).get must be === 1 } - counters foreach (_.stop) - failer.stop + counters foreach (_.stop()) + failer.stop() } "increment no counters with a failing transaction" in { @@ -106,21 +106,21 @@ class TransactorSpec extends WordSpec with MustMatchers { for (counter <- counters) { (counter !! GetCount).get must be === 0 } - counters foreach (_.stop) - failer.stop + counters foreach (_.stop()) + failer.stop() } } "Transactor" should { "be usable without overriding normally" in { - val transactor = Actor.actorOf(new Setter).start + val transactor = Actor.actorOf(new Setter).start() val ref = Ref(0) val latch = new CountDownLatch(1) transactor ! Set(ref, 5, latch) latch.await(timeout.length, timeout.unit) val value = atomic { ref.get } value must be === 5 - transactor.stop + transactor.stop() } } } diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index ce198be6bf..e6fd8ebbce 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -206,8 +206,8 @@ class NestingQueue { def pop = q.poll @volatile private var active = false - def enter { if (active) error("already active") else active = true } - def leave { if (!active) error("not active") else active = false } + def enter { if (active) sys.error("already active") else active = true } + def leave { if (!active) sys.error("not active") else active = false } def isActive = active } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 5ce90509ba..c5cfec6e43 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -46,7 +46,7 @@ class TestActor(queue : BlockingDeque[AnyRef]) extends Actor with FSM[Int, TestA * *
  * class Test extends TestKit {
- *     val test = actorOf[SomeActor].start
+ *     val test = actorOf[SomeActor].start()
  *
  *     within (1 second) {
  *       test ! SomeWork
@@ -77,7 +77,7 @@ trait TestKit {
    * ActorRef of the test actor. Access is provided to enable e.g.
    * registration as message target.
    */
-  protected val testActor = actorOf(new TestActor(queue)).start
+  protected val testActor = actorOf(new TestActor(queue)).start()
 
   /**
    * Implicit sender reference so that replies are possible for messages sent
@@ -98,7 +98,7 @@ trait TestKit {
    * Stop test actor. Should be done at the end of the test unless relying on
    * test actor timeout.
    */
-  def stopTestActor { testActor.stop }
+  def stopTestActor { testActor.stop() }
 
   /**
    * Set test actor timeout. By default, the test actor shuts itself down
diff --git a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/java/first/Pi.java b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/java/first/Pi.java
new file mode 100644
index 0000000000..1b2dd5e941
--- /dev/null
+++ b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/java/first/Pi.java
@@ -0,0 +1,209 @@
+/**
+ * Copyright (C) 2009-2011 Scalable Solutions AB 
+ */
+
+package akka.tutorial.java.first;
+
+import static akka.actor.Actors.actorOf;
+import static akka.actor.Actors.poisonPill;
+import static java.util.Arrays.asList;
+
+import akka.actor.ActorRef;
+import akka.actor.UntypedActor;
+import akka.actor.UntypedActorFactory;
+import akka.routing.CyclicIterator;
+import akka.routing.InfiniteIterator;
+import akka.routing.Routing.Broadcast;
+import akka.routing.UntypedLoadBalancer;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * First part in Akka tutorial for Java.
+ * 

+ * Calculates Pi. + *

+ * Run on command line: + *

+ *   $ cd akka-1.1
+ *   $ export AKKA_HOME=`pwd`
+ *   $ javac -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar akka/tutorial/java/first/Pi.java
+ *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.java.first.Pi
+ *   $ ...
+ * 
+ *

+ * Run it in Maven: + *

+ *   $ mvn
+ *   > scala:console
+ *   > val pi = new akka.tutorial.java.first.Pi
+ *   > pi.calculate(4, 10000, 10000)
+ *   > ...
+ * 
+ * + * @author Jonas Bonér + */ +public class Pi { + + public static void main(String[] args) throws Exception { + Pi pi = new Pi(); + pi.calculate(4, 10000, 10000); + } + + // ==================== + // ===== Messages ===== + // ==================== + static class Calculate {} + + static class Work { + private final int arg; + private final int nrOfElements; + + public Work(int arg, int nrOfElements) { + this.arg = arg; + this.nrOfElements = nrOfElements; + } + + public int getArg() { return arg; } + public int getNrOfElements() { return nrOfElements; } + } + + static class Result { + private final double value; + + public Result(double value) { + this.value = value; + } + + public double getValue() { return value; } + } + + // ================== + // ===== Worker ===== + // ================== + static class Worker extends UntypedActor { + + // define the work + private double calculatePiFor(int arg, int nrOfElements) { + double acc = 0.0; + for (int i = arg * nrOfElements; i <= ((arg + 1) * nrOfElements - 1); i++) { + acc += 4 * Math.pow(-1, i) / (2 * i + 1); + } + return acc; + } + + // message handler + public void onReceive(Object message) { + if (message instanceof Work) { + Work work = (Work) message; + getContext().replyUnsafe(new Result(calculatePiFor(work.getArg(), work.getNrOfElements()))); // perform the work + } else throw new IllegalArgumentException("Unknown message [" + message + "]"); + } + } + + // ================== + // ===== Master ===== + // ================== + static class Master extends UntypedActor { + private final int nrOfMessages; + private final int nrOfElements; + private final CountDownLatch latch; + + private double pi; + private int nrOfResults; + private long start; + + private ActorRef router; + + static class PiRouter extends UntypedLoadBalancer { + private final InfiniteIterator workers; + + public PiRouter(ActorRef[] workers) { + this.workers = new CyclicIterator(asList(workers)); + } + + public InfiniteIterator seq() { + return workers; + } + } + + public Master(int nrOfWorkers, int nrOfMessages, int nrOfElements, CountDownLatch latch) { + this.nrOfMessages = nrOfMessages; + this.nrOfElements = nrOfElements; + this.latch = latch; + + // create the workers + final ActorRef[] workers = new ActorRef[nrOfWorkers]; + for (int i = 0; i < nrOfWorkers; i++) { + workers[i] = actorOf(Worker.class).start(); + } + + // wrap them with a load-balancing router + router = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new PiRouter(workers); + } + }).start(); + } + + // message handler + public void onReceive(Object message) { + + if (message instanceof Calculate) { + // schedule work + for (int arg = 0; arg < nrOfMessages; arg++) { + router.sendOneWay(new Work(arg, nrOfElements), getContext()); + } + + // send a PoisonPill to all workers telling them to shut down themselves + router.sendOneWay(new Broadcast(poisonPill())); + + // send a PoisonPill to the router, telling him to shut himself down + router.sendOneWay(poisonPill()); + + } else if (message instanceof Result) { + + // handle result from the worker + Result result = (Result) message; + pi += result.getValue(); + nrOfResults += 1; + if (nrOfResults == nrOfMessages) getContext().stop(); + + } else throw new IllegalArgumentException("Unknown message [" + message + "]"); + } + + @Override + public void preStart() { + start = System.currentTimeMillis(); + } + + @Override + public void postStop() { + // tell the world that the calculation is complete + System.out.println(String.format("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis", pi, (System.currentTimeMillis() - start))); + latch.countDown(); + } + } + + // ================== + // ===== Run it ===== + // ================== + public void calculate(final int nrOfWorkers, final int nrOfElements, final int nrOfMessages) throws Exception { + + // this latch is only plumbing to kSystem.currentTimeMillis(); when the calculation is completed + final CountDownLatch latch = new CountDownLatch(1); + + // create the master + ActorRef master = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch); + } + }).start(); + + // start the calculation + master.sendOneWay(new Calculate()); + + // wait for master to shut down + latch.await(); + } +} diff --git a/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala b/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala new file mode 100644 index 0000000000..c31f8ee2f6 --- /dev/null +++ b/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala @@ -0,0 +1,136 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.tutorial.scala.first + +import akka.actor.{Actor, PoisonPill} +import Actor._ +import akka.routing.{Routing, CyclicIterator} +import Routing._ + +import System.{currentTimeMillis => now} +import java.util.concurrent.CountDownLatch + +/** + * First part in Akka tutorial. + *

+ * Calculates Pi. + *

+ * Run on command line: + *

+ *   $ cd akka-1.1
+ *   $ export AKKA_HOME=`pwd`
+ *   $ scalac -cp dist/akka-actor-1.1-SNAPSHOT.jar Pi.scala
+ *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.scala.first.Pi
+ *   $ ...
+ * 
+ *

+ * Run it in SBT: + *

+ *   $ sbt
+ *   > update
+ *   > console
+ *   > akka.tutorial.scala.first.Pi.calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
+ *   > ...
+ *   > :quit
+ * 
+ * + * @author Jonas Bonér + */ +object Pi extends App { + + calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) + + // ==================== + // ===== Messages ===== + // ==================== + sealed trait PiMessage + case object Calculate extends PiMessage + case class Work(start: Int, nrOfElements: Int) extends PiMessage + case class Result(value: Double) extends PiMessage + + // ================== + // ===== Worker ===== + // ================== + class Worker extends Actor { + + // define the work + def calculatePiFor(start: Int, nrOfElements: Int): Double = { + var acc = 0.0 + for (i <- start until (start + nrOfElements)) + acc += 4 * math.pow(-1, i) / (2 * i + 1) + acc + } + + def receive = { + case Work(start, nrOfElements) => + self reply Result(calculatePiFor(start, nrOfElements)) // perform the work + } + } + + // ================== + // ===== Master ===== + // ================== + class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) + extends Actor { + + var pi: Double = _ + var nrOfResults: Int = _ + var start: Long = _ + + // create the workers + val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start()) + + // wrap them with a load-balancing router + val router = Routing.loadBalancerActor(CyclicIterator(workers)).start() + + // message handler + def receive = { + case Calculate => + // schedule work + //for (arg <- 0 until nrOfMessages) router ! Work(arg, nrOfElements) + for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements) + + // send a PoisonPill to all workers telling them to shut down themselves + router ! Broadcast(PoisonPill) + + // send a PoisonPill to the router, telling him to shut himself down + router ! PoisonPill + + case Result(value) => + // handle result from the worker + pi += value + nrOfResults += 1 + if (nrOfResults == nrOfMessages) self.stop() + } + + override def preStart { + start = now + } + + override def postStop { + // tell the world that the calculation is complete + println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start))) + latch.countDown() + } + } + + // ================== + // ===== Run it ===== + // ================== + def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) { + + // this latch is only plumbing to know when the calculation is completed + val latch = new CountDownLatch(1) + + // create the master + val master = actorOf(new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() + + // start the calculation + master ! Calculate + + // wait for master to shut down + latch.await() + } +} diff --git a/akka-tutorials/akka-tutorial-second/src/main/java/akka/tutorial/java/second/Pi.java b/akka-tutorials/akka-tutorial-second/src/main/java/akka/tutorial/java/second/Pi.java new file mode 100644 index 0000000000..6397c09148 --- /dev/null +++ b/akka-tutorials/akka-tutorial-second/src/main/java/akka/tutorial/java/second/Pi.java @@ -0,0 +1,226 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.tutorial.java.second; + +import static akka.actor.Actors.actorOf; +import static akka.actor.Actors.poisonPill; +import static java.lang.System.currentTimeMillis; +import static java.util.Arrays.asList; +import scala.Option; +import akka.actor.ActorRef; +import akka.actor.Channel; +import akka.actor.UntypedActor; +import akka.actor.UntypedActorFactory; +import akka.dispatch.Future; +import akka.japi.Procedure; +import akka.routing.CyclicIterator; +import akka.routing.InfiniteIterator; +import akka.routing.Routing.Broadcast; +import akka.routing.UntypedLoadBalancer; + +/** + * Second part in Akka tutorial for Java. + *

+ * Calculates Pi. + *

+ * Run on command line: + *

+ *   $ cd akka-1.1
+ *   $ export AKKA_HOME=`pwd`
+ *   $ javac -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar akka/tutorial/java/second/Pi.java
+ *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.java.second.Pi
+ *   $ ...
+ * 
+ *

+ * Run it in Maven: + *

+ *   $ mvn
+ *   > scala:console
+ *   > val pi = new akka.tutorial.java.second.Pi
+ *   > pi.calculate(4, 10000, 10000)
+ *   > ...
+ * 
+ * + * @author Jonas Bonér + */ +public class Pi { + + public static void main(String[] args) throws Exception { + Pi pi = new Pi(); + pi.calculate(4, 10000, 10000); + } + + // ==================== + // ===== Messages ===== + // ==================== + static class Calculate {} + + static class Work { + private final int arg; + private final int nrOfElements; + + public Work(int arg, int nrOfElements) { + this.arg = arg; + this.nrOfElements = nrOfElements; + } + + public int getArg() { return arg; } + public int getNrOfElements() { return nrOfElements; } + } + + static class Result { + private final double value; + + public Result(double value) { + this.value = value; + } + + public double getValue() { return value; } + } + + // ================== + // ===== Worker ===== + // ================== + static class Worker extends UntypedActor { + + // define the work + private double calculatePiFor(int arg, int nrOfElements) { + double acc = 0.0; + for (int i = arg * nrOfElements; i <= ((arg + 1) * nrOfElements - 1); i++) { + acc += 4 * Math.pow(-1, i) / (2 * i + 1); + } + return acc; + } + + // message handler + public void onReceive(Object message) { + if (message instanceof Work) { + Work work = (Work) message; + getContext().replyUnsafe(new Result(calculatePiFor(work.getArg(), work.getNrOfElements()))); // perform the work + } else throw new IllegalArgumentException("Unknown message [" + message + "]"); + } + } + + // ================== + // ===== Master ===== + // ================== + static class Master extends UntypedActor { + private final int nrOfMessages; + private final int nrOfElements; + + private double pi; + private int nrOfResults; + + private ActorRef router; + + static class PiRouter extends UntypedLoadBalancer { + private final InfiniteIterator workers; + + public PiRouter(ActorRef[] workers) { + this.workers = new CyclicIterator(asList(workers)); + } + + public InfiniteIterator seq() { + return workers; + } + } + + public Master(int nrOfWorkers, int nrOfMessages, int nrOfElements) { + this.nrOfMessages = nrOfMessages; + this.nrOfElements = nrOfElements; + + // create the workers + final ActorRef[] workers = new ActorRef[nrOfWorkers]; + for (int i = 0; i < nrOfWorkers; i++) { + workers[i] = actorOf(Worker.class).start(); + } + + // wrap them with a load-balancing router + router = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new PiRouter(workers); + } + }).start(); + } + + @Override + public void preStart() { + become(scatter); + } + + // message handler + public void onReceive(Object message) { + throw new IllegalStateException("Should be gather or scatter"); + } + + private final Procedure scatter = new Procedure() { + public void apply(Object msg) { + // schedule work + for (int arg = 0; arg < nrOfMessages; arg++) { + router.sendOneWay(new Work(arg, nrOfElements), getContext()); + } + // Assume the gathering behavior + become(gather(getContext().getChannel())); + } + }; + + private Procedure gather(final Channel recipient) { + return new Procedure() { + public void apply(Object msg) { + // handle result from the worker + Result result = (Result) msg; + pi += result.getValue(); + nrOfResults += 1; + if (nrOfResults == nrOfMessages) { + // send the pi result back to the guy who started the calculation + recipient.sendOneWay(pi); + // shut ourselves down, we're done + getContext().stop(); + } + } + }; + } + + @Override + public void postStop() { + // send a PoisonPill to all workers telling them to shut down themselves + router.sendOneWay(new Broadcast(poisonPill())); + // send a PoisonPill to the router, telling him to shut himself down + router.sendOneWay(poisonPill()); + } + } + + // ================== + // ===== Run it ===== + // ================== + public void calculate(final int nrOfWorkers, final int nrOfElements, final int nrOfMessages) throws Exception { + + // create the master + ActorRef master = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new Master(nrOfWorkers, nrOfMessages, nrOfElements); + } + }).start(); + + // start the calculation + long start = currentTimeMillis(); + + // send calculate message + long timeout = 60000; + Future replyFuture = master.sendRequestReplyFuture(new Calculate(), timeout, null); + Option result = replyFuture.await().resultOrException(); + if (result.isDefined()) { + double pi = result.get(); + // TODO java api for EventHandler? +// EventHandler.info(this, String.format("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis", pi, (currentTimeMillis() - start))); + System.out.println(String.format("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis", pi, (currentTimeMillis() - start))); + } else { + // TODO java api for EventHandler? +// EventHandler.error(this, "Pi calculation did not complete within the timeout."); + System.out.println("Pi calculation did not complete within the timeout."); + } + + } +} diff --git a/akka-tutorials/akka-tutorial-second/src/main/scala/Pi.scala b/akka-tutorials/akka-tutorial-second/src/main/scala/Pi.scala new file mode 100644 index 0000000000..e7e10f56ef --- /dev/null +++ b/akka-tutorials/akka-tutorial-second/src/main/scala/Pi.scala @@ -0,0 +1,141 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.tutorial.second + +import akka.actor.Actor._ +import akka.routing.{Routing, CyclicIterator} +import Routing._ +import akka.event.EventHandler +import akka.actor.{Channel, Actor, PoisonPill} +import akka.dispatch.Future + +import System.{currentTimeMillis => now} + +/** + * Second part in Akka tutorial. + *

+ * Calculates Pi. + *

+ * Run on command line: + *

+ *   $ cd akka-1.1
+ *   $ export AKKA_HOME=`pwd`
+ *   $ scalac -cp dist/akka-actor-1.1-SNAPSHOT.jar Pi.scala
+ *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.second.Pi
+ *   $ ...
+ * 
+ *

+ * Run it in SBT: + *

+ *   $ sbt
+ *   > update
+ *   > console
+ *   > akka.tutorial.second.Pi.calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
+ *   > ...
+ *   > :quit
+ * 
+ * + * @author Jonas Bonér + */ +object Pi extends App { + + calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) + + // ==================== + // ===== Messages ===== + // ==================== + sealed trait PiMessage + case object Calculate extends PiMessage + case class Work(arg: Int, nrOfElements: Int) extends PiMessage + case class Result(value: Double) extends PiMessage + + // ================== + // ===== Worker ===== + // ================== + class Worker() extends Actor { + // define the work + val calculatePiFor = (arg: Int, nrOfElements: Int) => { + val range = (arg * nrOfElements) to ((arg + 1) * nrOfElements - 1) + var acc = 0.0D + range foreach (i => acc += 4 * math.pow(-1, i) / (2 * i + 1)) + acc + //range map (j => 4 * math.pow(-1, j) / (2 * j + 1)) sum + } + + def receive = { + case Work(arg, nrOfElements) => + self reply Result(calculatePiFor(arg, nrOfElements)) // perform the work + } + } + + // ================== + // ===== Master ===== + // ================== + case class Master(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) extends Actor { + var pi: Double = _ + var nrOfResults: Int = _ + + // create the workers + val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start()) + + // wrap them with a load-balancing router + val router = Routing.loadBalancerActor(CyclicIterator(workers)).start() + + // phase 1, can accept a Calculate message + def scatter: Receive = { + case Calculate => + // schedule work + for (arg <- 0 until nrOfMessages) router ! Work(arg, nrOfElements) + + //Assume the gathering behavior + this become gather(self.channel) + } + + // phase 2, aggregate the results of the Calculation + def gather(recipient: Channel[Any]): Receive = { + case Result(value) => + // handle result from the worker + pi += value + nrOfResults += 1 + if (nrOfResults == nrOfMessages) { + // send the pi result back to the guy who started the calculation + recipient ! pi + // shut ourselves down, we're done + self.stop() + } + } + + // message handler starts at the scattering behavior + def receive = scatter + + // when we are stopped, stop our team of workers and our router + override def postStop { + // send a PoisonPill to all workers telling them to shut down themselves + router ! Broadcast(PoisonPill) + // send a PoisonPill to the router, telling him to shut himself down + router ! PoisonPill + } + } + + // ================== + // ===== Run it ===== + // ================== + def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) { + // create the master + val master = actorOf(new Master(nrOfWorkers, nrOfElements, nrOfMessages)).start() + + //start the calculation + val start = now + + //send calculate message + master.!!![Double](Calculate, timeout = 60000). + await.resultOrException match {//wait for the result, with a 60 seconds timeout + case Some(pi) => + EventHandler.info(this, "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start))) + case None => + EventHandler.error(this, "Pi calculation did not complete within the timeout.") + } + } +} diff --git a/akka-typed-actor/src/main/java/akka/config/TypedActorGuiceModule.java b/akka-typed-actor/src/main/java/akka/config/TypedActorGuiceModule.java index 8c370a8218..2452eeb706 100644 --- a/akka-typed-actor/src/main/java/akka/config/TypedActorGuiceModule.java +++ b/akka-typed-actor/src/main/java/akka/config/TypedActorGuiceModule.java @@ -26,7 +26,10 @@ public class TypedActorGuiceModule extends AbstractModule { final DependencyBinding db = bindings.get(i); //if (db.getInterface() ne null) bind((Class) db.getInterface()).to((Class) db.getTarget()).in(Singleton.class); //else - this.bind(db.getInterface()).toInstance(db.getTarget()); + + @SuppressWarnings("unchecked") + Class intf = db.getInterface(); + this.bind(intf).toInstance(db.getTarget()); } } } diff --git a/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala index 3fcf0789bc..800385545b 100644 --- a/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala @@ -17,10 +17,10 @@ import org.codehaus.aspectwerkz.proxy.Proxy import org.codehaus.aspectwerkz.annotation.{Aspect, Around} import java.net.InetSocketAddress -import java.util.concurrent.atomic.AtomicBoolean -import scala.reflect.BeanProperty import java.lang.reflect.{Method, Field, InvocationHandler, Proxy => JProxy} +import scala.reflect.BeanProperty + /** * TypedActor is a type-safe actor made out of a POJO with interface. * Void methods are turned into fire-forget messages. @@ -36,7 +36,7 @@ import java.lang.reflect.{Method, Field, InvocationHandler, Proxy => JProxy} * class TestActorImpl extends TypedActor implements TestActor { * * public void hit(int count) { - * Pong pong = (Pong) getContext().getSender(); + * Pong pong = (Pong) context().sender(); * pong.hit(count++); * } * @@ -124,15 +124,15 @@ abstract class TypedActor extends Actor with Proxyable { * This class does not contain static information but is updated by the runtime system * at runtime. *

- * You can get a hold of the context using either the 'getContext()' or 'context' - * methods from the 'TypedActor' base class. + * You can get a hold of the context using the 'context()' + * method from the 'TypedActor' base class. *

* * Here is an example of usage (in Java): *

    * class PingImpl extends TypedActor implements Ping {
    *   public void hit(int count) {
-   *     Pong pong = (Pong) getContext().getSender();
+   *     Pong pong = (Pong) context().sender();
    *     pong.hit(count++);
    *   }
    * }
@@ -148,7 +148,40 @@ abstract class TypedActor extends Actor with Proxyable {
    * }
    * 
*/ - @BeanProperty val context: TypedActorContext = new TypedActorContext(self) + val context: TypedActorContext = new TypedActorContext(self) + + /** + * @deprecated 'getContext()' is deprecated use 'context()' + */ + def getContext: TypedActorContext = context + + /** + * User overridable callback. + *

+ * Is called when an Actor is started by invoking 'actor.start()'. + */ + override def preStart {} + + /** + * User overridable callback. + *

+ * Is called when 'actor.stop()' is invoked. + */ + override def postStop {} + + /** + * User overridable callback. + *

+ * Is called on a crashed Actor right BEFORE it is restarted to allow clean up of resources before Actor is terminated. + */ + override def preRestart(reason: Throwable) {} + + /** + * User overridable callback. + *

+ * Is called right AFTER restart on the newly created Actor to allow reinitialization after an Actor crash. + */ + override def postRestart(reason: Throwable) {} /** * This method is used to resolve the Future for TypedActor methods that are defined to return a @@ -178,17 +211,22 @@ abstract class TypedActor extends Actor with Proxyable { def receive = { case joinPoint: JoinPoint => - SenderContextInfo.senderActorRef.value = self - SenderContextInfo.senderProxy.value = proxy + SenderContextInfo.senderActorRef.withValue(self) { + SenderContextInfo.senderProxy.withValue(proxy) { + if (Actor.SERIALIZE_MESSAGES) serializeArguments(joinPoint) + if (TypedActor.isOneWay(joinPoint)) joinPoint.proceed + else self.reply(joinPoint.proceed) + } + } - if (Actor.SERIALIZE_MESSAGES) serializeArguments(joinPoint) - if (TypedActor.isOneWay(joinPoint)) joinPoint.proceed - else self.reply(joinPoint.proceed) case coordinated @ Coordinated(joinPoint: JoinPoint) => - SenderContextInfo.senderActorRef.value = self - SenderContextInfo.senderProxy.value = proxy - if (Actor.SERIALIZE_MESSAGES) serializeArguments(joinPoint) - coordinated atomic { joinPoint.proceed } + SenderContextInfo.senderActorRef.withValue(self) { + SenderContextInfo.senderProxy.withValue(proxy) { + if (Actor.SERIALIZE_MESSAGES) serializeArguments(joinPoint) + coordinated atomic { joinPoint.proceed } + } + } + case Link(proxy) => self.link(proxy) case Unlink(proxy) => self.unlink(proxy) case unexpected => throw new IllegalActorStateException( @@ -255,7 +293,7 @@ abstract class TypedActor extends Actor with Proxyable { *

  * class PingImpl extends TypedActor implements Ping {
  *   public void hit(int count) {
- *     Pong pong = (Pong) getContext().getSender();
+ *     Pong pong = (Pong) context().sender();
  *     pong.hit(count++);
  *   }
  * }
@@ -277,7 +315,8 @@ final class TypedActorContext(private[akka] val actorRef: ActorRef) {
   private[akka] var _sender: AnyRef = _
 
   /**
-5  * Returns the uuid for the actor.
+   * Returns the uuid for the actor.
+   * @deprecated use 'uuid()'
    */
   def getUuid() = actorRef.uuid
 
@@ -287,31 +326,42 @@ final class TypedActorContext(private[akka] val actorRef: ActorRef) {
   def uuid = actorRef.uuid
 
   def timeout = actorRef.timeout
+
+  /**
+   * @deprecated use 'timeout()'
+   */
   def getTimout = timeout
   def setTimout(timeout: Long) = actorRef.timeout = timeout
 
   def id =  actorRef.id
+
+  /**
+   * @deprecated use 'id()'
+   */
   def getId = id
   def setId(id: String) = actorRef.id = id
 
   def receiveTimeout = actorRef.receiveTimeout
+
+  /**
+   * @deprecated use 'receiveTimeout()'
+   */
   def getReceiveTimeout = receiveTimeout
   def setReceiveTimeout(timeout: Long) = actorRef.setReceiveTimeout(timeout)
 
-  /**
-   * Is the actor running?
-   */
+  def mailboxSize = actorRef.mailboxSize
+
+  def dispatcher = actorRef.getDispatcher
+
+  def lifeCycle = actorRef.getLifeCycle
+
   def isRunning: Boolean = actorRef.isRunning
-
-  /**
-   * Is the actor shut down?
-   */
   def isShutdown: Boolean = actorRef.isShutdown
-
-  /**
-   * Is the actor ever started?
-   */
   def isUnstarted: Boolean = actorRef.isUnstarted
+  def isBeingRestarted: Boolean = actorRef.isBeingRestarted
+
+  def getSelfAs[T <: AnyRef](): T = TypedActor.proxyFor(actorRef).get.asInstanceOf[T]
+  def getSelf(): AnyRef = getSelfAs[AnyRef]
 
   /**
    * Returns the current sender reference.
@@ -349,7 +399,7 @@ final class TypedActorContext(private[akka] val actorRef: ActorRef) {
   /**
     * Returns the home address and port for this actor.
     */
-  def homeAddress: InetSocketAddress = actorRef.homeAddress.getOrElse(null)//TODO: REVISIT: Sensible to return null?
+  def homeAddress: InetSocketAddress = actorRef.homeAddress.getOrElse(null)
 }
 
 object TypedActorConfiguration {
@@ -449,7 +499,7 @@ object TypedActor {
    * @param intfClass interface the typed actor implements
    * @param targetClass implementation class of the typed actor
    */
-  def newInstance[T](intfClass: Class[T], targetClass: Class[_]): T = 
+  def newInstance[T](intfClass: Class[T], targetClass: Class[_]): T =
     newInstance(intfClass, targetClass, TypedActorConfiguration())
 
   /**
@@ -468,6 +518,7 @@ object TypedActor {
    * @param host hostanme of the remote server
    * @param port port of the remote server
    */
+  @deprecated("Will be removed after 1.1")
   def newRemoteInstance[T](intfClass: Class[T], targetClass: Class[_], hostname: String, port: Int): T = {
     newInstance(intfClass, targetClass, TypedActorConfiguration(hostname, port))
   }
@@ -479,6 +530,7 @@ object TypedActor {
    * @param host hostanme of the remote server
    * @param port port of the remote server
    */
+  @deprecated("Will be removed after 1.1")
   def newRemoteInstance[T](intfClass: Class[T], factory: => AnyRef, hostname: String, port: Int): T = {
     newInstance(intfClass, factory, TypedActorConfiguration(hostname, port))
   }
@@ -589,7 +641,7 @@ object TypedActor {
     }
 
     AspectInitRegistry.register(proxy, AspectInit(intfClass, typedActor, actorRef, remoteAddress, actorRef.timeout))
-    actorRef.start
+    actorRef.start()
     proxy.asInstanceOf[T]
   }
 
@@ -677,7 +729,7 @@ object TypedActor {
       actorRef.timeout = timeout
       if (remoteAddress.isDefined) actorRef.makeRemote(remoteAddress.get)
       AspectInitRegistry.register(proxy, AspectInit(targetClass, proxy, actorRef, remoteAddress, timeout))
-      actorRef.start
+      actorRef.start()
       proxy.asInstanceOf[T]
     }
   */
@@ -869,8 +921,8 @@ private[akka] abstract class ActorAspect {
   protected def localDispatch(joinPoint: JoinPoint): AnyRef = {
     val methodRtti = joinPoint.getRtti.asInstanceOf[MethodRtti]
     val isOneWay = TypedActor.isOneWay(methodRtti)
-    val senderActorRef = Some(SenderContextInfo.senderActorRef.value)
-    val senderProxy = Some(SenderContextInfo.senderProxy.value)
+    val senderActorRef = Option(SenderContextInfo.senderActorRef.value)
+    val senderProxy = Option(SenderContextInfo.senderProxy.value)
     val isCoordinated = TypedActor.isCoordinated(methodRtti)
 
     typedActor.context._sender = senderProxy
@@ -920,6 +972,7 @@ private[akka] abstract class ActorAspect {
   protected def remoteDispatch(joinPoint: JoinPoint): AnyRef = {
     val methodRtti = joinPoint.getRtti.asInstanceOf[MethodRtti]
     val isOneWay = TypedActor.isOneWay(methodRtti)
+    val senderActorRef = Option(SenderContextInfo.senderActorRef.value)
 
     def extractOwnerTypeHint(s: String) =
       s.indexOf(TypedActor.AW_PROXY_PREFIX) match {
@@ -932,8 +985,11 @@ private[akka] abstract class ActorAspect {
         methodRtti.getParameterTypes,
         methodRtti.getParameterValues))
 
+    //FIXME send the interface name of the senderProxy in the TypedActorContext and assemble a context.sender with that interface on the server
+    //val senderProxy = Option(SenderContextInfo.senderProxy.value)
+
     val future = Actor.remote.send[AnyRef](
-      message, None, None, remoteAddress.get,
+      message, senderActorRef, None, remoteAddress.get,
       timeout, isOneWay, actorRef,
       Some((interfaceClass.getName, methodRtti.getMethod.getName)),
       ActorType.TypedActor,
@@ -997,7 +1053,7 @@ private[akka] object AspectInitRegistry extends ListenerManagement {
     val init = if (proxy ne null) initializations.remove(proxy) else null
     if (init ne null) {
       notifyListeners(AspectInitUnregistered(proxy, init))
-      init.actorRef.stop
+      init.actorRef.stop()
     }
     init
   }
diff --git a/akka-typed-actor/src/main/scala/akka/config/TypedActorGuiceConfigurator.scala b/akka-typed-actor/src/main/scala/akka/config/TypedActorGuiceConfigurator.scala
index 315467f2ee..ae19601351 100644
--- a/akka-typed-actor/src/main/scala/akka/config/TypedActorGuiceConfigurator.scala
+++ b/akka-typed-actor/src/main/scala/akka/config/TypedActorGuiceConfigurator.scala
@@ -31,7 +31,6 @@ private[akka] class TypedActorGuiceConfigurator extends TypedActorConfiguratorBa
   private var components: List[SuperviseTypedActor] = _
   private var supervised: List[Supervise] = Nil
   private var bindings: List[DependencyBinding] = Nil
-  private var configRegistry = new HashMap[Class[_], SuperviseTypedActor] // TODO is configRegistry needed?
   private var typedActorRegistry = new HashMap[Class[_], Tuple3[AnyRef, AnyRef, SuperviseTypedActor]]
   private var modules = new java.util.ArrayList[Module]
   private var methodToUriRegistry = new HashMap[Method, String]
@@ -126,7 +125,7 @@ private[akka] class TypedActorGuiceConfigurator extends TypedActorConfiguratorBa
       proxy,
       AspectInit(interfaceClass, typedActor, actorRef, remoteAddress, timeout))
     typedActor.initialize(proxy)
-    actorRef.start
+    actorRef.start()
 
     supervised ::= Supervise(actorRef, component.lifeCycle)
 
@@ -167,7 +166,6 @@ private[akka] class TypedActorGuiceConfigurator extends TypedActorConfiguratorBa
 
   def reset = synchronized {
     modules = new java.util.ArrayList[Module]
-    configRegistry = new HashMap[Class[_], SuperviseTypedActor]
     typedActorRegistry = new HashMap[Class[_], Tuple3[AnyRef, AnyRef, SuperviseTypedActor]]
     methodToUriRegistry = new HashMap[Method, String]
     injector = null
diff --git a/akka-typed-actor/src/test/scala/actor/typed-actor/Issue675Spec.scala b/akka-typed-actor/src/test/scala/actor/typed-actor/Issue675Spec.scala
index e978b61c45..a30bef028d 100644
--- a/akka-typed-actor/src/test/scala/actor/typed-actor/Issue675Spec.scala
+++ b/akka-typed-actor/src/test/scala/actor/typed-actor/Issue675Spec.scala
@@ -38,7 +38,7 @@ class Issue675Spec extends
   BeforeAndAfterEach {
 
   override def afterEach() {
-    Actor.registry.shutdownAll
+    Actor.registry.shutdownAll()
   }
 
   describe("TypedActor preStart method") {
diff --git a/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorLifecycleSpec.scala b/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorLifecycleSpec.scala
index 0946aa26c0..78cd4535c9 100644
--- a/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorLifecycleSpec.scala
+++ b/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorLifecycleSpec.scala
@@ -12,6 +12,10 @@ import akka.config.Supervision._
 import java.util.concurrent.CountDownLatch
 import akka.config.TypedActorConfigurator
 
+import akka.testing._
+import akka.util.duration._
+
+
 /**
  * @author Martin Krasser
  */
@@ -95,7 +99,7 @@ class TypedActorLifecycleSpec extends Spec with ShouldMatchers with BeforeAndAft
     }
 
     it("should be stopped when supervision cannot handle the problem in") {
-      val actorSupervision = new SuperviseTypedActor(classOf[TypedActorFailer],classOf[TypedActorFailerImpl],permanent(),30000)
+      val actorSupervision = new SuperviseTypedActor(classOf[TypedActorFailer], classOf[TypedActorFailerImpl], permanent(), 30000)
       val conf = new TypedActorConfigurator().configure(OneForOneStrategy(Nil, 3, 500000), Array(actorSupervision)).inject.supervise
       try {
         val first = conf.getInstance(classOf[TypedActorFailer])
@@ -105,15 +109,18 @@ class TypedActorLifecycleSpec extends Spec with ShouldMatchers with BeforeAndAft
         } catch {
           case r: RuntimeException if r.getMessage == "expected" => //expected
         }
-        val second = conf.getInstance(classOf[TypedActorFailer])
 
+        // allow some time for the actor to be stopped
+        Testing.sleepFor(3 seconds)
+
+        val second = conf.getInstance(classOf[TypedActorFailer])
         first should be (second)
 
         try {
           second.fail
           fail("shouldn't get here")
         } catch {
-          case r: ActorInitializationException if r.getMessage == "Actor has not been started, you need to invoke 'actor.start' before using it" => //expected
+          case r: ActorInitializationException if r.getMessage == "Actor has not been started, you need to invoke 'actor.start()' before using it" => //expected
         }
       } finally {
         conf.stop
@@ -121,7 +128,7 @@ class TypedActorLifecycleSpec extends Spec with ShouldMatchers with BeforeAndAft
     }
 
     it("should be restarted when supervision handles the problem in") {
-     val actorSupervision = new SuperviseTypedActor(classOf[TypedActorFailer],classOf[TypedActorFailerImpl],permanent(),30000)
+     val actorSupervision = new SuperviseTypedActor(classOf[TypedActorFailer],classOf[TypedActorFailerImpl],permanent(), 30000)
      val conf = new TypedActorConfigurator().configure(OneForOneStrategy(classOf[Throwable] :: Nil, 3, 500000), Array(actorSupervision)).inject.supervise
      try {
        val first = conf.getInstance(classOf[TypedActorFailer])
@@ -146,4 +153,4 @@ class TypedActorLifecycleSpec extends Spec with ShouldMatchers with BeforeAndAft
      }
    }
  }
-}
\ No newline at end of file
+}
diff --git a/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorRegistrySpec.scala b/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorRegistrySpec.scala
index 0a031026ef..a83dccc18d 100644
--- a/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorRegistrySpec.scala
+++ b/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorRegistrySpec.scala
@@ -17,34 +17,34 @@ class TypedActorRegistrySpec extends WordSpec with MustMatchers {
   "Typed Actor" should {
 
     "be able to be retreived from the registry by class" in {
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
       val my = TypedActor.newInstance[My](classOf[My], classOf[MyImpl], 3000)
       val actors = Actor.registry.typedActorsFor(classOf[My])
       actors.length must be (1)
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
     }
 
     "be able to be retreived from the registry by manifest" in {
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
       val my = TypedActor.newInstance[My](classOf[My], classOf[MyImpl], 3000)
       val option = Actor.registry.typedActorFor[My]
       option must not be (null)
       option.isDefined must be (true)
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
     }
 
     "be able to be retreived from the registry by class two times" in {
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
       val my = TypedActor.newInstance[My](classOf[My], classOf[MyImpl], 3000)
       val actors1 = Actor.registry.typedActorsFor(classOf[My])
       actors1.length must be (1)
       val actors2 = Actor.registry.typedActorsFor(classOf[My])
       actors2.length must be (1)
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
     }
 
     "be able to be retreived from the registry by manifest two times" in {
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
       val my = TypedActor.newInstance[My](classOf[My], classOf[MyImpl], 3000)
       val option1 = Actor.registry.typedActorFor[My]
       option1 must not be (null)
@@ -52,11 +52,11 @@ class TypedActorRegistrySpec extends WordSpec with MustMatchers {
       val option2 = Actor.registry.typedActorFor[My]
       option2 must not be (null)
       option2.isDefined must be (true)
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
     }
 
     "be able to be retreived from the registry by manifest two times (even when created in supervisor)" in {
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
       val manager = new TypedActorConfigurator
       manager.configure(
         OneForOneStrategy(classOf[Exception] :: Nil, 3, 1000),
@@ -69,7 +69,7 @@ class TypedActorRegistrySpec extends WordSpec with MustMatchers {
       val option2 = Actor.registry.typedActorFor[My]
       option2 must not be (null)
       option2.isDefined must be (true)
-      Actor.registry.shutdownAll
+      Actor.registry.shutdownAll()
     }
   }
 }
diff --git a/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorSpec.scala b/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorSpec.scala
index f871f98841..6391b2bc51 100644
--- a/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorSpec.scala
+++ b/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorSpec.scala
@@ -68,7 +68,7 @@ class TypedActorSpec extends
   }
 
   override def afterEach() {
-    Actor.registry.shutdownAll
+    Actor.registry.shutdownAll()
   }
 
   describe("TypedActor") {
@@ -131,11 +131,11 @@ class TypedActorSpec extends
       assert(typedActors.contains(pojo))
 
       // creating untyped actor with same custom id
-      val actorRef = Actor.actorOf[MyActor].start
+      val actorRef = Actor.actorOf[MyActor].start()
       val typedActors2 = Actor.registry.typedActorsFor("my-custom-id")
       assert(typedActors2.length === 1)
       assert(typedActors2.contains(pojo))
-      actorRef.stop
+      actorRef.stop()
     }
 
     it("should support to filter typed actors") {
@@ -166,7 +166,7 @@ class TypedActorSpec extends
     }
 
     it("should support foreach for typed actors") {
-      val actorRef = Actor.actorOf[MyActor].start
+      val actorRef = Actor.actorOf[MyActor].start()
       assert(Actor.registry.actors.size === 3)
       assert(Actor.registry.typedActors.size === 2)
       Actor.registry.foreachTypedActor(TypedActor.stop(_))
@@ -175,7 +175,7 @@ class TypedActorSpec extends
     }
 
     it("should shutdown all typed and untyped actors") {
-      val actorRef = Actor.actorOf[MyActor].start
+      val actorRef = Actor.actorOf[MyActor].start()
       assert(Actor.registry.actors.size === 3)
       assert(Actor.registry.typedActors.size === 2)
       Actor.registry.shutdownAll()
diff --git a/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorUtilFunctionsSpec.scala b/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorUtilFunctionsSpec.scala
index d2243e92e6..87e2078389 100644
--- a/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorUtilFunctionsSpec.scala
+++ b/akka-typed-actor/src/test/scala/actor/typed-actor/TypedActorUtilFunctionsSpec.scala
@@ -14,7 +14,7 @@ class ActorObjectUtilFunctionsSpec extends junit.framework.TestCase with Suite w
     val latch = new CountDownLatch(1)
 
     spawn {
-      latch.countDown
+      latch.countDown()
     }
 
     val done = latch.await(10,TimeUnit.SECONDS)
diff --git a/config/akka-reference.conf b/config/akka-reference.conf
index 342f8e6316..df2c2c3e0d 100644
--- a/config/akka-reference.conf
+++ b/config/akka-reference.conf
@@ -11,9 +11,9 @@ akka {
   enabled-modules = []       # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"]
 
   time-unit = "seconds"      # Time unit for all timeout properties throughout the config
-  
+
   event-handlers = ["akka.event.EventHandler$DefaultListener"] # event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT)
-  event-handler-level = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG
+  event-handler-level = "INFO" # Options: ERROR, WARNING, INFO, DEBUG
 
   # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up
   #     Can be used to bootstrap your application(s)
@@ -146,6 +146,11 @@ akka {
     }
 
     client {
+      buffering {
+        retry-message-send-on-failure = on
+        capacity = -1                      # If negative (or zero) then an unbounded mailbox is used (default)
+                                           # If positive then a bounded mailbox is used and the capacity is set using the property
+      }
       reconnect-delay = 5
       read-timeout = 10
       message-frame-size = 1048576
@@ -153,8 +158,4 @@ akka {
       reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
     }
   }
-
-  storage {
-    max-retries = 10
-  }
 }
diff --git a/project/build.properties b/project/build.properties
index c5b83d798f..8275b16e18 100644
--- a/project/build.properties
+++ b/project/build.properties
@@ -1,8 +1,5 @@
-#Project properties
-#Tue Nov 23 12:37:45 CET 2010
 project.organization=se.scalablesolutions.akka
 project.name=akka
-sbt.version=0.7.5.RC0
 project.version=1.1-SNAPSHOT
-def.scala.version=2.7.7
-build.scala.versions=2.8.1
+build.scala.versions=2.9.0.RC1
+sbt.version=0.7.6.RC0
diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala
index 127994aa30..72598969ba 100644
--- a/project/build/AkkaProject.scala
+++ b/project/build/AkkaProject.scala
@@ -18,10 +18,8 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
 
   val scalaCompileSettings =
     Seq("-deprecation",
-        "-Xmigration",
-        //"-Xcheckinit",
-        //"-optimise",
-        "-Xwarninit",
+        //"-Xmigration",
+        "-optimise",
         "-encoding", "utf8")
 
   val javaCompileSettings = Seq("-Xlint:unchecked")
@@ -98,14 +96,14 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
   lazy val jerseyModuleConfig      = ModuleConfiguration("com.sun.jersey", JavaNetRepo)
   lazy val multiverseModuleConfig  = ModuleConfiguration("org.multiverse", CodehausRepo)
   lazy val nettyModuleConfig       = ModuleConfiguration("org.jboss.netty", JBossRepo)
-  lazy val scalaTestModuleConfig   = ModuleConfiguration("org.scalatest", ScalaToolsRelRepo)
+  lazy val scalaTestModuleConfig   = ModuleConfiguration("org.scalatest", ScalaToolsSnapshotRepo)
   lazy val spdeModuleConfig        = ModuleConfiguration("us.technically.spde", DatabinderRepo)
   lazy val processingModuleConfig  = ModuleConfiguration("org.processing", DatabinderRepo)
-  lazy val scalaModuleConfig       = ModuleConfiguration("org.scala-lang", ScalaToolsSnapshotRepo)
   lazy val sjsonModuleConfig       = ModuleConfiguration("net.debasishg", ScalaToolsRelRepo)
   lazy val lzfModuleConfig         = ModuleConfiguration("voldemort.store.compress", "h2-lzf", AkkaRepo)
   lazy val vscaladocModuleConfig   = ModuleConfiguration("org.scala-tools", "vscaladoc", "1.1-md-3", AkkaRepo)
   lazy val aspectWerkzModuleConfig = ModuleConfiguration("org.codehaus.aspectwerkz", "aspectwerkz", "2.2.3", AkkaRepo)
+  lazy val objenesisModuleConfig   = ModuleConfiguration("org.objenesis", sbt.DefaultMavenRepository)
   lazy val localMavenRepo          = LocalMavenRepo // Second exception, also fast! ;-)
 
   // -------------------------------------------------------------------------------------------------------------------
@@ -115,7 +113,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
   lazy val JACKSON_VERSION       = "1.7.1"
   lazy val JERSEY_VERSION        = "1.3"
   lazy val MULTIVERSE_VERSION    = "0.6.2"
-  lazy val SCALATEST_VERSION     = "1.3"
+  lazy val SCALATEST_VERSION     = "1.4-SNAPSHOT"
   lazy val JETTY_VERSION         = "7.2.2.v20101205"
   lazy val JAVAX_SERVLET_VERSION = "3.0"
   lazy val SLF4J_VERSION         = "1.6.0"
@@ -129,7 +127,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
     // Compile
     lazy val aopalliance = "aopalliance" % "aopalliance" % "1.0" % "compile" //Public domain
 
-    lazy val aspectwerkz = "org.codehaus.aspectwerkz" % "aspectwerkz" % "2.2.3" % "compile" //LGPL 2.1
+    lazy val aspectwerkz = "org.codehaus.aspectwerkz" % "aspectwerkz" % "2.2.3" % "compile" //ApacheV2
 
     lazy val commons_codec = "commons-codec" % "commons-codec" % "1.4" % "compile" //ApacheV2
 
@@ -158,8 +156,8 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
 
     lazy val protobuf = "com.google.protobuf" % "protobuf-java" % "2.3.0" % "compile" //New BSD
 
-    lazy val sjson      = "net.debasishg" % "sjson_2.8.1" % "0.10" % "compile" //ApacheV2
-    lazy val sjson_test = "net.debasishg" % "sjson_2.8.1" % "0.10" % "test" //ApacheV2
+    lazy val sjson      = "net.debasishg" % "sjson_2.9.0.RC1" % "0.11" % "compile" //ApacheV2
+    lazy val sjson_test = "net.debasishg" % "sjson_2.9.0.RC1" % "0.11" % "test" //ApacheV2
 
     lazy val slf4j   = "org.slf4j"      % "slf4j-api"       % "1.6.0"
     lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.24"
@@ -181,18 +179,20 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
   // -------------------------------------------------------------------------------------------------------------------
 
   lazy val akka_actor       = project("akka-actor",       "akka-actor",       new AkkaActorProject(_))
+  lazy val akka_testkit     = project("akka-testkit",     "akka-testkit",     new AkkaTestkitProject(_),    akka_actor)
+  lazy val akka_actor_tests = project("akka-actor-tests", "akka-actor-tests", new AkkaActorTestsProject(_), akka_testkit)
   lazy val akka_stm         = project("akka-stm",         "akka-stm",         new AkkaStmProject(_),        akka_actor)
-  lazy val akka_typed_actor = project("akka-typed-actor", "akka-typed-actor", new AkkaTypedActorProject(_), akka_stm)
+  lazy val akka_typed_actor = project("akka-typed-actor", "akka-typed-actor", new AkkaTypedActorProject(_), akka_stm, akka_actor_tests)
   lazy val akka_remote      = project("akka-remote",      "akka-remote",      new AkkaRemoteProject(_),     akka_typed_actor)
   lazy val akka_http        = project("akka-http",        "akka-http",        new AkkaHttpProject(_),       akka_actor)
   lazy val akka_samples     = project("akka-samples",     "akka-samples",     new AkkaSamplesParentProject(_))
-  lazy val akka_sbt_plugin  = project("akka-sbt-plugin",  "akka-sbt-plugin",  new AkkaSbtPluginProject(_))
-  lazy val akka_testkit     = project("akka-testkit",     "akka-testkit",     new AkkaTestkitProject(_),    akka_actor)
   lazy val akka_slf4j       = project("akka-slf4j",       "akka-slf4j",       new AkkaSlf4jProject(_),      akka_actor)
+  lazy val akka_tutorials   = project("akka-tutorials",   "akka-tutorials",   new AkkaTutorialsParentProject(_),      akka_actor)
 
   // -------------------------------------------------------------------------------------------------------------------
   // Miscellaneous
   // -------------------------------------------------------------------------------------------------------------------
+
   override def disableCrossPaths = true
 
   override def packageOptions =
@@ -209,31 +209,26 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
     (super.fullClasspath(config) ** "slf4j*1.5.11.jar")
   }
 
-//  override def runClasspath = super.runClasspath +++ "config"
-
   // ------------------------------------------------------------
-  // publishing
+  // Publishing
+  // ------------------------------------------------------------
+
   override def managedStyle = ManagedStyle.Maven
 
-  //override def defaultPublishRepository = Some(Resolver.file("maven-local", Path.userHome / ".m2" / "repository" asFile))
-  val publishTo = Resolver.file("maven-local", Path.userHome / ".m2" / "repository" asFile)
+  lazy val akkaPublishRepository = systemOptional[String]("akka.publish.repository", "default")
+  lazy val akkaPublishCredentials = systemOptional[String]("akka.publish.credentials", "none")
 
-  override def artifacts = Set(Artifact(artifactID, "pom", "pom"))
+  if (akkaPublishCredentials.value != "none") Credentials(akkaPublishCredentials.value, log)
 
-  override def deliverProjectDependencies =
-    super.deliverProjectDependencies.toList - akka_samples.projectID - akka_sbt_plugin.projectID
+  def publishToRepository = {
+    val repoUrl = akkaPublishRepository.value
+    if (repoUrl != "default") Resolver.url("Akka Publish Repository", new java.net.URL(repoUrl))
+    else Resolver.file("Local Maven Repository", Path.userHome / ".m2" / "repository" asFile)
+  }
 
-  // val sourceArtifact = Artifact(artifactID, "src", "jar", Some("sources"), Nil, None)
-  // val docsArtifact   = Artifact(artifactID, "doc", "jar", Some("docs"), Nil, None)
+  val publishTo = publishToRepository
 
-  // Credentials(Path.userHome / ".akka_publish_credentials", log)
-
-  // override def documentOptions = encodingUtf8.map(SimpleDocOption(_))
-  // override def packageDocsJar = defaultJarPath("-docs.jar")
-  // override def packageSrcJar= defaultJarPath("-sources.jar")
-  // override def packageToPublishActions = super.packageToPublishActions ++ Seq(packageDocs, packageSrc)
-
-  override def pomExtra =
+  override def pomExtra = {
     2009
     http://akka.io
     
@@ -247,27 +242,15 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
         repo
       
     
+  }
 
-  // publish to local mvn
-  import Process._
-  lazy val publishLocalMvn = runMvnInstall
-  def runMvnInstall = task {
-    for (absPath <- akkaArtifacts.getPaths) {
-      val artifactRE = """(.*)/dist/(.*)-(\d.*)\.jar""".r
-      val artifactRE(path, artifactId, artifactVersion) = absPath
-      val command = "mvn install:install-file" +
-                    " -Dfile=" + absPath +
-                    " -DgroupId=se.scalablesolutions.akka" +
-                    " -DartifactId=" + artifactId +
-                    " -Dversion=" + version +
-                    " -Dpackaging=jar -DgeneratePom=true"
-      command ! log
-    }
-    None
-  } dependsOn(dist) describedAs("Run mvn install for artifacts in dist.")
+  override def artifacts = Set(Artifact(artifactID, "pom", "pom"))
 
+  override def deliverProjectDependencies = super.deliverProjectDependencies.toList - akka_samples.projectID - akka_tutorials.projectID
 
+  // ------------------------------------------------------------  
   // Build release
+  // ------------------------------------------------------------
 
   val localReleasePath = outputPath / "release" / version.toString
   val localReleaseRepository = Resolver.file("Local Release", localReleasePath / "repository" asFile)
@@ -289,16 +272,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
   // -------------------------------------------------------------------------------------------------------------------
 
   class AkkaActorProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) {
-    // testing
-    val junit           = Dependencies.junit
-    val scalatest       = Dependencies.scalatest
-    val multiverse_test = Dependencies.multiverse_test // StandardLatch
-
     override def bndExportPackage = super.bndExportPackage ++ Seq("com.eaio.*;version=3.2")
-
-    // some tests depend on testkit, so include that and make sure it's compiled
-    override def testClasspath = super.testClasspath +++ akka_testkit.path("target") / "classes"
-    override def testCompileAction = super.testCompileAction dependsOn (akka_testkit.compile)
   }
 
   // -------------------------------------------------------------------------------------------------------------------
@@ -325,6 +299,9 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
     // testing
     val junit     = Dependencies.junit
     val scalatest = Dependencies.scalatest
+
+    override def deliverProjectDependencies =
+      super.deliverProjectDependencies.toList - akka_actor_tests.projectID ++ Seq(akka_actor_tests.projectID % "test")
   }
 
   // -------------------------------------------------------------------------------------------------------------------
@@ -346,6 +323,13 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
     val junit     = Dependencies.junit
     val scalatest = Dependencies.scalatest
 
+    lazy val networkTestsEnabled = systemOptional[Boolean]("akka.test.network", false)
+
+    override def testOptions = super.testOptions ++ {
+      if (!networkTestsEnabled.value) Seq(TestFilter(test => !test.endsWith("NetworkTest")))
+      else Seq.empty
+    }
+
     override def bndImportPackage = "javax.transaction;version=1.1" :: super.bndImportPackage.toList
   }
 
@@ -371,7 +355,6 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
   // Examples
   // -------------------------------------------------------------------------------------------------------------------
 
-  /** FIXME SPDE doesn't exist for 2.9.0-SNAPSHOT
   class AkkaSampleAntsProject(info: ProjectInfo) extends DefaultSpdeProject(info) {
     override def disableCrossPaths = true
     override def spdeSourcePath = mainSourcePath / "spde"
@@ -386,22 +369,25 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
       val releaseConfiguration = new DefaultPublishConfiguration(localReleaseRepository, "release", false)
       publishTask(publishIvyModule, releaseConfiguration) dependsOn (deliver, publishLocal, makePom)
     }
-  }*/
+  }
 
   class AkkaSampleRemoteProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath)
 
+  class AkkaSampleChatProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath)
+
   class AkkaSampleFSMProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath)
 
   class AkkaSamplesParentProject(info: ProjectInfo) extends ParentProject(info) {
     override def disableCrossPaths = true
 
-    //FIXME ANts is disabled due to unavailable for 2.9.0-SNAPSHOT
-   // lazy val akka_sample_ants = project("akka-sample-ants", "akka-sample-ants",
-    //  new AkkaSampleAntsProject(_), akka_stm)
+    lazy val akka_sample_ants = project("akka-sample-ants", "akka-sample-ants",
+      new AkkaSampleAntsProject(_), akka_stm)
     lazy val akka_sample_fsm = project("akka-sample-fsm", "akka-sample-fsm",
       new AkkaSampleFSMProject(_), akka_actor)
     lazy val akka_sample_remote = project("akka-sample-remote", "akka-sample-remote",
       new AkkaSampleRemoteProject(_), akka_remote)
+    lazy val akka_sample_chat = project("akka-sample-chat", "akka-sample-chat",
+      new AkkaSampleChatProject(_), akka_remote)
 
     lazy val publishRelease = {
       val releaseConfiguration = new DefaultPublishConfiguration(localReleaseRepository, "release", false)
@@ -410,10 +396,22 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
   }
 
   // -------------------------------------------------------------------------------------------------------------------
-  // akka-sbt-plugin subproject
+  // Tutorials
   // -------------------------------------------------------------------------------------------------------------------
 
-  class AkkaSbtPluginProject(info: ProjectInfo) extends PluginProject(info) {
+  class AkkaTutorialFirstProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath)
+
+  class AkkaTutorialSecondProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath)
+
+  class AkkaTutorialsParentProject(info: ProjectInfo) extends ParentProject(info) {
+    override def disableCrossPaths = true
+
+    lazy val akka_tutorial_first = project("akka-tutorial-first", "akka-tutorial-first",
+      new AkkaTutorialFirstProject(_), akka_actor)
+
+    lazy val akka_tutorial_second = project("akka-tutorial-second", "akka-tutorial-second",
+      new AkkaTutorialSecondProject(_), akka_actor)
+
     lazy val publishRelease = {
       val releaseConfiguration = new DefaultPublishConfiguration(localReleaseRepository, "release", false)
       publishTask(publishIvyModule, releaseConfiguration) dependsOn (deliver, publishLocal, makePom)
@@ -426,6 +424,17 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
 
   class AkkaTestkitProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath)
 
+  // -------------------------------------------------------------------------------------------------------------------
+  // akka-actor-tests subproject
+  // -------------------------------------------------------------------------------------------------------------------
+
+  class AkkaActorTestsProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) {
+    // testing
+    val junit           = Dependencies.junit
+    val scalatest       = Dependencies.scalatest
+    val multiverse_test = Dependencies.multiverse_test // StandardLatch
+  }
+  
   // -------------------------------------------------------------------------------------------------------------------
   // akka-slf4j subproject
   // -------------------------------------------------------------------------------------------------------------------
@@ -445,8 +454,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
 
   def allArtifacts = {
     Path.fromFile(buildScalaInstance.libraryJar) +++
-    (removeDupEntries(runClasspath filter ClasspathUtilities.isArchive) ---
-    (akka_sbt_plugin.runClasspath filter ClasspathUtilities.isArchive) +++
+    (removeDupEntries(runClasspath filter ClasspathUtilities.isArchive) +++
     ((outputPath ##) / defaultJarName) +++
     mainResources +++
     mainDependencies.scalaJars +++
@@ -463,11 +471,9 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
   }
 
   def akkaArtifacts = descendents(info.projectPath / "dist", "*-" + version + ".jar")
-  lazy val integrationTestsEnabled = systemOptional[Boolean]("integration.tests",false)
-  lazy val stressTestsEnabled = systemOptional[Boolean]("stress.tests",false)
 
   // ------------------------------------------------------------
-  class AkkaDefaultProject(info: ProjectInfo, val deployPath: Path) extends DefaultProject(info) 
+  class AkkaDefaultProject(info: ProjectInfo, val deployPath: Path) extends DefaultProject(info)
     with DeployProject with OSGiProject with McPom {
     override def disableCrossPaths = true
 
@@ -483,21 +489,15 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
     override def packageToPublishActions = super.packageToPublishActions ++ Seq(this.packageDocs, this.packageSrc)
     override def pomPostProcess(node: scala.xml.Node): scala.xml.Node = mcPom(AkkaParentProject.this.moduleConfigurations)(super.pomPostProcess(node))
 
-    /**
-     * Used for testOptions, possibility to enable the running of integration and or stresstests
-     *
-     * To enable set true and disable set false
-     * set integration.tests true
-     * set stress.tests true
-     */
-    def createTestFilter(defaultTests: (String) => Boolean) = { TestFilter({
-        case s: String if defaultTests(s) => true
-        case s: String if integrationTestsEnabled.value => s.endsWith("TestIntegration")
-        case s: String if stressTestsEnabled.value      => s.endsWith("TestStress")
-        case _ => false
-      }) :: Nil
+    lazy val excludeTestsProperty = systemOptional[String]("akka.test.exclude", "")
+
+    def excludeTests = {
+      val exclude = excludeTestsProperty.value
+      if (exclude.isEmpty) Seq.empty else exclude.split(",").toSeq
     }
 
+    override def testOptions = super.testOptions ++ excludeTests.map(exclude => TestFilter(test => !test.contains(exclude)))
+
     lazy val publishRelease = {
       val releaseConfiguration = new DefaultPublishConfiguration(localReleaseRepository, "release", false)
       publishTask(publishIvyModule, releaseConfiguration) dependsOn (deliver, publishLocal, makePom)
@@ -542,12 +542,12 @@ trait McPom { self: DefaultProject =>
       case u                   => u + "/"
     }
 
-    val oldRepos = 
-      (node \\ "project" \ "repositories" \ "repository").map { n => 
+    val oldRepos =
+      (node \\ "project" \ "repositories" \ "repository").map { n =>
         cleanUrl((n \ "url").text) -> (n \ "name").text
       }.toList
 
-    val newRepos = 
+    val newRepos =
       mcs.filter(_.resolver.isInstanceOf[MavenRepository]).map { m =>
         val r = m.resolver.asInstanceOf[MavenRepository]
         cleanUrl(r.root) -> r.name
diff --git a/project/plugins/Plugins.scala b/project/plugins/Plugins.scala
index 0a2e64a2a8..ce3e609964 100644
--- a/project/plugins/Plugins.scala
+++ b/project/plugins/Plugins.scala
@@ -8,7 +8,6 @@ class Plugins(info: ProjectInfo) extends PluginDefinition(info) {
   object Repositories {
     lazy val AquteRepo      = "aQute Maven Repository" at "http://www.aqute.biz/repo"
     lazy val DatabinderRepo = "Databinder Repository" at "http://databinder.net/repo"
-    lazy val EmbeddedRepo   = "Embedded Repo" at (info.projectPath / "embedded-repo").asURL.toString
   }
 
   // -------------------------------------------------------------------------------------------------------------------
@@ -24,6 +23,6 @@ class Plugins(info: ProjectInfo) extends PluginDefinition(info) {
   // -------------------------------------------------------------------------------------------------------------------
   // Dependencies
   // -------------------------------------------------------------------------------------------------------------------
-  lazy val bnd4sbt    = "com.weiglewilczek.bnd4sbt" % "bnd4sbt"           % "1.0.0.RC4"
+  lazy val bnd4sbt    = "com.weiglewilczek.bnd4sbt" % "bnd4sbt"           % "1.0.1"
   lazy val spdeSbt    = "us.technically.spde"       % "spde-sbt-plugin"   % "0.4.2"
 }
diff --git a/scripts/git-remove-history.sh b/scripts/git-remove-history.sh
new file mode 100755
index 0000000000..5daf35f5c5
--- /dev/null
+++ b/scripts/git-remove-history.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+cat <<'EOT'
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ This command rewrites GIT history like git-rebase. Beware never to rewrite   @
+@ trees which are already published, as that would deeply upset all cloning    @
+@ repos. For more details see 'git help rebase'. Tread carefully!              @
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+EOT
+read -p "I know what I am doing: " answer
+test "$answer" = yes || exit 1
+
+set -o errexit
+ 
+# Author: David Underhill
+# Script to permanently delete files/folders from your git repository.  To use 
+# it, cd to your repository's root and then run the script with a list of paths
+# you want to delete, e.g., git-delete-history path1 path2
+ 
+if [ $# -eq 0 ]; then
+    exit 0
+fi
+ 
+# make sure we're at the root of git repo
+if [ ! -d .git ]; then
+    echo "Error: must run this script from the root of a git repository"
+    exit 1
+fi
+ 
+# remove all paths passed as arguments from the history of the repo
+files=$@
+git filter-branch --index-filter "git rm -rf --cached --ignore-unmatch $files" HEAD
+ 
+# remove the temporary history git-filter-branch otherwise leaves behind for a long time
+rm -rf .git/refs/original/ && git reflog expire --all &&  git gc --aggressive --prune
diff --git a/scripts/ip-mod.sh b/scripts/ip-mod.sh
new file mode 100755
index 0000000000..e9b509ae59
--- /dev/null
+++ b/scripts/ip-mod.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+# flush rules
+ipfw del pipe 1
+ipfw del pipe 2
+ipfw -q -f flush
+ipfw -q -f pipe flush
+
+if [ "$1" == "" ]; then
+    echo "Options: ip-mod.sh slow"
+    echo "         ip-mod.sh block"
+    echo "         ip-mod.sh reset"
+    echo "         ip-mod.sh restore"
+    exit
+elif [ "$1" == "restore" ]; then
+    echo "restoring normal network"
+    exit
+elif [ "$1" == "slow" ]; then
+    # simulate slow connection 
+    echo "enabling slow connection"
+    ipfw add pipe 1 ip from any to any
+    ipfw add pipe 2 ip from any to any
+    ipfw pipe 1 config bw 60KByte/s delay 350ms
+    ipfw pipe 2 config bw 60KByte/s delay 350ms
+elif [ "$1" == "block" ]; then
+    echo "enabling blocked connections"
+    ipfw add 1 deny tcp from any to any 1024-65535
+elif [ "$1" == "reset" ]; then
+    echo "enabling reset connections"
+    ipfw add 1 reset tcp from any to any 1024-65535
+fi