diff --git a/.gitignore b/.gitignore index 28bd0c884d..91c3a65819 100755 --- a/.gitignore +++ b/.gitignore @@ -46,5 +46,6 @@ multiverse.log .eprj .*.swp akka-docs/_build/ +*.pyc akka-tutorials/akka-tutorial-first/project/boot/ akka-tutorials/akka-tutorial-first/project/plugins/project/ \ No newline at end of file diff --git a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java index d35946ce60..cdec7f5631 100644 --- a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java +++ b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java @@ -3,28 +3,50 @@ package akka.dispatch; import org.junit.Test; import static org.junit.Assert.*; import java.util.concurrent.Callable; +import java.util.LinkedList; import akka.japi.Function; import akka.japi.Procedure; import scala.Some; import scala.Right; import static akka.dispatch.Futures.future; +import static akka.dispatch.Futures.traverse; +import static akka.dispatch.Futures.sequence; -@SuppressWarnings("unchecked") public class JavaFutureTests { +public class JavaFutureTests { @Test public void mustBeAbleToMapAFuture() { - Future f1 = future(new Callable() { + Future f1 = future(new Callable() { public String call() { return "Hello"; } }); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Function() { public String apply(String s) { return s + " World"; } }); - assertEquals(new Some(new Right("Hello World")), f2.await().value()); + assertEquals("Hello World", f2.get()); + } + + // TODO: Improve this test, perhaps with an Actor + @Test public void mustSequenceAFutureList() { + LinkedList> listFutures = new LinkedList>(); + LinkedList listExpected = new LinkedList(); + + for (int i = 0; i < 10; i++) { + listExpected.add("test"); + listFutures.add(future(new Callable() { + public String call() { + return "test"; + } + })); + } + + Future> futureList = sequence(listFutures); + + assertEquals(futureList.get(), listExpected); } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTimingSpec.scala index 606ac280b7..158bd3f0ee 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTimingSpec.scala @@ -4,6 +4,7 @@ import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers import akka.testkit.TestKit +import akka.util.Duration import akka.util.duration._ @@ -29,6 +30,18 @@ class FSMTimingSpec extends WordSpec with MustMatchers with TestKit { } } + "allow StateTimeout override" in { + within (500 millis) { + fsm ! TestStateTimeoutOverride + expectNoMsg + } + within (50 millis) { + fsm ! Cancel + expectMsg(Cancel) + expectMsg(Transition(fsm, TestStateTimeout, Initial)) + } + } + "receive single-shot timer" in { within (50 millis, 150 millis) { fsm ! TestSingleTimer @@ -81,6 +94,7 @@ object FSMTimingSpec { trait State case object Initial extends State case object TestStateTimeout extends State + case object TestStateTimeoutOverride extends State case object TestSingleTimer extends State case object TestRepeatedTimer extends State case object TestUnhandled extends State @@ -102,10 +116,13 @@ object FSMTimingSpec { case Ev(TestRepeatedTimer) => setTimer("tester", Tick, 100 millis, true) goto(TestRepeatedTimer) using 4 + case Ev(TestStateTimeoutOverride) => + goto(TestStateTimeout) forMax (Duration.Inf) case Ev(x : FSMTimingSpec.State) => goto(x) } when(TestStateTimeout, stateTimeout = 100 millis) { case Ev(StateTimeout) => goto(Initial) + case Ev(Cancel) => goto(Initial) replying (Cancel) } when(TestSingleTimer) { case Ev(Tick) => diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTransitionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTransitionSpec.scala new file mode 100644 index 0000000000..fd0485a116 --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/FSMTransitionSpec.scala @@ -0,0 +1,90 @@ +package akka.actor + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers + +import akka.testing._ +import akka.testkit._ +import akka.util.duration._ +import akka.config.Supervision._ + +import FSM._ + +object FSMTransitionSpec { + + class Supervisor extends Actor { + self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), None, None) + def receive = { case _ => } + } + + class MyFSM(target : ActorRef) extends Actor with FSM[Int, Unit] { + startWith(0, Unit) + when(0) { + case Ev("tick") => goto(1) + } + when(1) { + case Ev("tick") => goto(0) + } + whenUnhandled { + case Ev("reply") => stay replying "reply" + } + initialize + override def preRestart(reason : Throwable) { target ! "restarted" } + } + + class Forwarder(target : ActorRef) extends Actor { + def receive = { case x => target ! x } + } + +} + +class FSMTransitionSpec extends WordSpec with MustMatchers with TestKit { + + import FSMTransitionSpec._ + + "A FSM transition notifier" must { + + "notify listeners" in { + val fsm = Actor.actorOf(new MyFSM(testActor)).start() + within(1 second) { + fsm ! SubscribeTransitionCallBack(testActor) + expectMsg(CurrentState(fsm, 0)) + fsm ! "tick" + expectMsg(Transition(fsm, 0, 1)) + fsm ! "tick" + expectMsg(Transition(fsm, 1, 0)) + } + } + + "not fail when listener goes away" in { + val forward = Actor.actorOf(new Forwarder(testActor)).start() + val fsm = Actor.actorOf(new MyFSM(testActor)).start() + val sup = Actor.actorOf[Supervisor].start() + sup link fsm + within(300 millis) { + fsm ! SubscribeTransitionCallBack(forward) + expectMsg(CurrentState(fsm, 0)) + forward.stop() + fsm ! "tick" + expectNoMsg + } + } + + "not fail when listener is invalid" in { + val forward = Actor.actorOf(new Forwarder(testActor)) + val fsm = Actor.actorOf(new MyFSM(testActor)).start() + val sup = Actor.actorOf[Supervisor].start() + sup link fsm + within(300 millis) { + fsm ! SubscribeTransitionCallBack(forward) + fsm ! "reply" + expectMsg("reply") + forward.start() + fsm ! SubscribeTransitionCallBack(forward) + expectMsg(CurrentState(fsm, 0)) + } + } + + } + +} diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index 6fc96bb6d2..e12294a70d 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -316,11 +316,11 @@ class FutureSpec extends JUnitSuite { }).start() val oddFutures: List[Future[Int]] = List.fill(100)(oddActor !!! 'GetNext) - assert(Futures.sequence(oddFutures).get.sum === 10000) + assert(Future.sequence(oddFutures).get.sum === 10000) oddActor.stop() val list = (1 to 100).toList - assert(Futures.traverse(list)(x => Future(x * 2 - 1)).get.sum === 10000) + assert(Future.traverse(list)(x => Future(x * 2 - 1)).get.sum === 10000) } @Test def shouldHandleThrowables { diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index d79bd0651e..51b805f69e 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -1,537 +1,532 @@ package akka.actor.routing +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers + +import akka.testing._ +import akka.testing.Testing.{sleepFor, testMillis} +import akka.util.duration._ + import akka.actor.Actor import akka.actor.Actor._ - -import org.scalatest.Suite -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner -import org.scalatest.matchers.MustMatchers -import org.junit.Test - -import java.util.concurrent.atomic.AtomicInteger -import java.util.concurrent.{CountDownLatch, TimeUnit} import akka.routing._ -@RunWith(classOf[JUnitRunner]) -class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers { +import java.util.concurrent.atomic.AtomicInteger + + +class RoutingSpec extends WordSpec with MustMatchers { import Routing._ - @Test def testDispatcher = { - val (testMsg1,testMsg2,testMsg3,testMsg4) = ("test1","test2","test3","test4") - val targetOk = new AtomicInteger(0) - val t1 = actorOf( new Actor() { - def receive = { - case `testMsg1` => self.reply(3) - case `testMsg2` => self.reply(7) - } - } ).start() + "Routing" must { - val t2 = actorOf( new Actor() { - def receive = { - case `testMsg3` => self.reply(11) - } - }).start() + "dispatch" in { + val Test1 = "test1" + val Test2 = "test2" + val Test3 = "test3" - val d = dispatcherActor { - case `testMsg1`|`testMsg2` => t1 - case `testMsg3` => t2 - }.start() - - val result = for { - a <- (d !! (testMsg1, 5000)).as[Int] - b <- (d !! (testMsg2, 5000)).as[Int] - c <- (d !! (testMsg3, 5000)).as[Int] - } yield a + b + c - - result.isDefined must be (true) - result.get must be(21) - - for(a <- List(t1,t2,d)) a.stop() - } - - @Test def testLogger = { - val msgs = new java.util.concurrent.ConcurrentSkipListSet[Any] - val latch = new CountDownLatch(2) - val t1 = actorOf(new Actor { def receive = { case _ => } }).start() - val l = loggerActor(t1,(x) => { msgs.add(x); latch.countDown() }).start() - val foo : Any = "foo" - val bar : Any = "bar" - l ! foo - l ! bar - val done = latch.await(5,TimeUnit.SECONDS) - done must be (true) - msgs must ( have size (2) and contain (foo) and contain (bar) ) - t1.stop() - l.stop() - } - - @Test def testSmallestMailboxFirstDispatcher = { - val t1ProcessedCount = new AtomicInteger(0) - val latch = new CountDownLatch(500) - val t1 = actorOf(new Actor { - def receive = { - case x => - Thread.sleep(50) // slow actor - t1ProcessedCount.incrementAndGet - latch.countDown() - } - }).start() - - val t2ProcessedCount = new AtomicInteger(0) - val t2 = actorOf(new Actor { - def receive = { - case x => t2ProcessedCount.incrementAndGet - latch.countDown() - } - }).start() - val d = loadBalancerActor(new SmallestMailboxFirstIterator(t1 :: t2 :: Nil)) - for (i <- 1 to 500) d ! i - val done = latch.await(10,TimeUnit.SECONDS) - done must be (true) - t1ProcessedCount.get must be < (t2ProcessedCount.get) // because t1 is much slower and thus has a bigger mailbox all the time - for(a <- List(t1,t2,d)) a.stop() - } - - @Test def testListener = { - val latch = new CountDownLatch(2) - val foreachListener = new CountDownLatch(2) - val num = new AtomicInteger(0) - val i = actorOf(new Actor with Listeners { - def receive = listenerManagement orElse { - case "foo" => gossip("bar") - } - }) - i.start() - - def newListener = actorOf(new Actor { - def receive = { - case "bar" => - num.incrementAndGet - latch.countDown() - case "foo" => foreachListener.countDown() - } - }).start() - - val a1 = newListener - val a2 = newListener - val a3 = newListener - - i ! Listen(a1) - i ! Listen(a2) - i ! Listen(a3) - i ! Deafen(a3) - i ! WithListeners(_ ! "foo") - i ! "foo" - - val done = latch.await(5,TimeUnit.SECONDS) - done must be (true) - num.get must be (2) - val withListeners = foreachListener.await(5,TimeUnit.SECONDS) - withListeners must be (true) - for(a <- List(i,a1,a2,a3)) a.stop() - } - - @Test def testIsDefinedAt = { - import akka.actor.ActorRef - - val (testMsg1,testMsg2,testMsg3,testMsg4) = ("test1","test2","test3","test4") - - val t1 = actorOf( new Actor() { - def receive = { - case `testMsg1` => self.reply(3) - case `testMsg2` => self.reply(7) - } - } ).start() - - val t2 = actorOf( new Actor() { - def receive = { - case `testMsg1` => self.reply(3) - case `testMsg2` => self.reply(7) - } - } ).start() - - val t3 = actorOf( new Actor() { - def receive = { - case `testMsg1` => self.reply(3) - case `testMsg2` => self.reply(7) - } - } ).start() - - val t4 = actorOf( new Actor() { - def receive = { - case `testMsg1` => self.reply(3) - case `testMsg2` => self.reply(7) - } - } ).start() - - val d1 = loadBalancerActor(new SmallestMailboxFirstIterator(t1 :: t2 :: Nil)) - val d2 = loadBalancerActor(new CyclicIterator[ActorRef](t3 :: t4 :: Nil)) - - t1.isDefinedAt(testMsg1) must be (true) - t1.isDefinedAt(testMsg3) must be (false) - t2.isDefinedAt(testMsg1) must be (true) - t2.isDefinedAt(testMsg3) must be (false) - d1.isDefinedAt(testMsg1) must be (true) - d1.isDefinedAt(testMsg3) must be (false) - d2.isDefinedAt(testMsg1) must be (true) - d2.isDefinedAt(testMsg3) must be (false) - - for(a <- List(t1,t2,d1,d2)) a.stop() - } - - // Actor Pool Capacity Tests - - // - // make sure the pool is of the fixed, expected capacity - // - @Test def testFixedCapacityActorPool = { - val latch = new CountDownLatch(2) - val counter = new AtomicInteger(0) - class TestPool extends Actor with DefaultActorPool - with FixedCapacityStrategy - with SmallestMailboxSelector - { - def factory = actorOf(new Actor { + val t1 = actorOf(new Actor { def receive = { - case _ => - counter.incrementAndGet - latch.countDown() - self reply_? "success" + case Test1 => self.reply(3) + case Test2 => self.reply(7) } - }) + }).start() - def limit = 2 - def selectionCount = 1 - def partialFill = true - def instance = factory - def receive = _route - } - - val successes = new CountDownLatch(2) - implicit val successCounterActor = Some(actorOf(new Actor { - def receive = { - case "success" => successes.countDown() - } - }).start()) - - val pool = actorOf(new TestPool).start() - pool ! "a" - pool ! "b" - - latch.await(1,TimeUnit.SECONDS) must be (true) - successes.await(1,TimeUnit.SECONDS) must be (true) - counter.get must be (2) - (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2) - - pool stop - } - - @Test def testTicket705 = { - - val actorPool = actorOf(new Actor with DefaultActorPool - with BoundedCapacityStrategy - with MailboxPressureCapacitor - with SmallestMailboxSelector - with BasicFilter { - //with BasicNoBackoffFilter { - def lowerBound = 2 - def upperBound = 20 - def rampupRate = 0.1 - def backoffRate = 0.1 - def backoffThreshold = 0.5 - def partialFill = true - def selectionCount = 1 - def instance = factory - def receive = _route - def pressureThreshold = 1 - def factory = actorOf(new Actor { - def receive = { - case req: String => { - Thread.sleep(10L) - self.reply_?("Response") - } - } - }) - }).start() - - try { - (for(count <- 1 to 500) yield actorPool.!!![String]("Test", 20000)) foreach { - _.await.resultOrException.get must be ("Response") - } - } finally { - actorPool.stop() - } - } - - // - // make sure the pool starts at the expected lower limit and grows to the upper as needed - // as influenced by the backlog of blocking pooled actors - // - @Test def testBoundedCapacityActorPoolWithActiveFuturesPressure = { - - var latch = new CountDownLatch(3) - val counter = new AtomicInteger(0) - class TestPool extends Actor with DefaultActorPool - with BoundedCapacityStrategy - with ActiveFuturesPressureCapacitor - with SmallestMailboxSelector - with BasicNoBackoffFilter - { - def factory = actorOf(new Actor { + val t2 = actorOf( new Actor() { def receive = { - case n:Int => - Thread.sleep(n) - counter.incrementAndGet + case Test3 => self.reply(11) + } + }).start() + + val d = dispatcherActor { + case Test1 | Test2 => t1 + case Test3 => t2 + }.start() + + val result = for { + a <- (d !! (Test1, testMillis(5 seconds))).as[Int] + b <- (d !! (Test2, testMillis(5 seconds))).as[Int] + c <- (d !! (Test3, testMillis(5 seconds))).as[Int] + } yield a + b + c + + result.isDefined must be (true) + result.get must be (21) + + for(a <- List(t1, t2, d)) a.stop() + } + + "have messages logged" in { + val msgs = new java.util.concurrent.ConcurrentSkipListSet[Any] + val latch = TestLatch(2) + + val actor = actorOf(new Actor { + def receive = { case _ => } + }).start() + + val logger = loggerActor(actor, x => { msgs.add(x); latch.countDown() }).start() + + val foo: Any = "foo" + val bar: Any = "bar" + + logger ! foo + logger ! bar + + latch.await + + msgs must ( have size (2) and contain (foo) and contain (bar) ) + + actor.stop() + logger.stop() + } + + "dispatch to smallest mailbox" in { + val t1Count = new AtomicInteger(0) + val t2Count = new AtomicInteger(0) + val latch = TestLatch(500) + + val t1 = actorOf(new Actor { + def receive = { + case x => + sleepFor(50 millis) // slow actor + t1Count.incrementAndGet latch.countDown() } - }) + }).start() - def lowerBound = 2 - def upperBound = 4 - def rampupRate = 0.1 - def partialFill = true - def selectionCount = 1 - def instance = factory - def receive = _route - } - - // - // first message should create the minimum number of delgates - // - val pool = actorOf(new TestPool).start() - pool ! 1 - (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2) - - var loops = 0 - def loop(t:Int) = { - latch = new CountDownLatch(loops) - counter.set(0) - for (m <- 0 until loops) { - pool !!! t - Thread.sleep(50) - } - } - - // - // 2 more should go thru w/out triggering more - // - loops = 2 - loop(500) - var done = latch.await(5,TimeUnit.SECONDS) - done must be (true) - counter.get must be (loops) - (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2) - - // - // a whole bunch should max it out - // - loops = 10 - loop(500) - - done = latch.await(5,TimeUnit.SECONDS) - done must be (true) - counter.get must be (loops) - (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (4) - - pool stop - } - - // - // make sure the pool starts at the expected lower limit and grows to the upper as needed - // as influenced by the backlog of messages in the delegate mailboxes - // - @Test def testBoundedCapacityActorPoolWithMailboxPressure = { - - var latch = new CountDownLatch(3) - val counter = new AtomicInteger(0) - class TestPool extends Actor with DefaultActorPool - with BoundedCapacityStrategy - with MailboxPressureCapacitor - with SmallestMailboxSelector - with BasicNoBackoffFilter - { - def factory = actorOf(new Actor { + val t2 = actorOf(new Actor { def receive = { - case n:Int => - Thread.sleep(n) - counter.incrementAndGet + case x => + t2Count.incrementAndGet latch.countDown() } - }) + }).start() - def lowerBound = 2 - def upperBound = 4 - def pressureThreshold = 3 - def rampupRate = 0.1 - def partialFill = true - def selectionCount = 1 - def instance = factory - def receive = _route + val d = loadBalancerActor(new SmallestMailboxFirstIterator(t1 :: t2 :: Nil)) + + for (i <- 1 to 500) d ! i + + latch.await(10 seconds) + + // because t1 is much slower and thus has a bigger mailbox all the time + t1Count.get must be < (t2Count.get) + + for(a <- List(t1, t2, d)) a.stop() } - val pool = actorOf(new TestPool).start() - var loops = 0 - def loop(t:Int) = { - latch = new CountDownLatch(loops) - counter.set(0) - for (m <- 0 until loops) { - pool ! t + "listen" in { + val fooLatch = TestLatch(2) + val barLatch = TestLatch(2) + val barCount = new AtomicInteger(0) + + val broadcast = actorOf(new Actor with Listeners { + def receive = listenerManagement orElse { + case "foo" => gossip("bar") } - } + }).start() + + def newListener = actorOf(new Actor { + def receive = { + case "bar" => + barCount.incrementAndGet + barLatch.countDown() + case "foo" => + fooLatch.countDown() + } + }).start() + + val a1 = newListener + val a2 = newListener + val a3 = newListener + + broadcast ! Listen(a1) + broadcast ! Listen(a2) + broadcast ! Listen(a3) + + broadcast ! Deafen(a3) + + broadcast ! WithListeners(_ ! "foo") + broadcast ! "foo" + + barLatch.await + barCount.get must be (2) + + fooLatch.await + + for(a <- List(broadcast, a1 ,a2 ,a3)) a.stop() + } + + "be defined at" in { + import akka.actor.ActorRef + + val Yes = "yes" + val No = "no" + + def testActor() = actorOf( new Actor() { + def receive = { + case Yes => "yes" + } + }).start() + + val t1 = testActor() + val t2 = testActor() + val t3 = testActor() + val t4 = testActor() + + val d1 = loadBalancerActor(new SmallestMailboxFirstIterator(t1 :: t2 :: Nil)) + val d2 = loadBalancerActor(new CyclicIterator[ActorRef](t3 :: t4 :: Nil)) + + t1.isDefinedAt(Yes) must be (true) + t1.isDefinedAt(No) must be (false) + t2.isDefinedAt(Yes) must be (true) + t2.isDefinedAt(No) must be (false) + d1.isDefinedAt(Yes) must be (true) + d1.isDefinedAt(No) must be (false) + d2.isDefinedAt(Yes) must be (true) + d2.isDefinedAt(No) must be (false) + + for(a <- List(t1, t2, d1, d2)) a.stop() + } + } + + "Actor Pool" must { + + "have expected capacity" in { + val latch = TestLatch(2) + val count = new AtomicInteger(0) + + val pool = actorOf( + new Actor with DefaultActorPool + with FixedCapacityStrategy + with SmallestMailboxSelector + { + def factory = actorOf(new Actor { + def receive = { + case _ => + count.incrementAndGet + latch.countDown() + self reply_? "success" + } + }).start() + + def limit = 2 + def selectionCount = 1 + def partialFill = true + def instance = factory + def receive = _route + }).start() + + val successes = TestLatch(2) + val successCounter = Some(actorOf(new Actor { + def receive = { + case "success" => successes.countDown() + } + }).start()) + + implicit val replyTo = successCounter + pool ! "a" + pool ! "b" + + latch.await + successes.await + + count.get must be (2) + + (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2) + + pool.stop() + } + + + "pass ticket #705" in { + val pool = actorOf( + new Actor with DefaultActorPool + with BoundedCapacityStrategy + with MailboxPressureCapacitor + with SmallestMailboxSelector + with BasicFilter + { + def lowerBound = 2 + def upperBound = 20 + def rampupRate = 0.1 + def backoffRate = 0.1 + def backoffThreshold = 0.5 + def partialFill = true + def selectionCount = 1 + def instance = factory + def receive = _route + def pressureThreshold = 1 + def factory = actorOf(new Actor { + def receive = { + case req: String => { + sleepFor(10 millis) + self.reply_?("Response") + } + } + }) + }).start() + + try { + (for (count <- 1 to 500) yield pool.!!![String]("Test", 20000)) foreach { + _.await.resultOrException.get must be ("Response") + } + } finally { + pool.stop() + } + } + + "grow as needed under pressure" in { + // make sure the pool starts at the expected lower limit and grows to the upper as needed + // as influenced by the backlog of blocking pooled actors + + var latch = TestLatch(3) + val count = new AtomicInteger(0) + + val pool = actorOf( + new Actor with DefaultActorPool + with BoundedCapacityStrategy + with ActiveFuturesPressureCapacitor + with SmallestMailboxSelector + with BasicNoBackoffFilter + { + def factory = actorOf(new Actor { + def receive = { + case n: Int => + sleepFor(n millis) + count.incrementAndGet + latch.countDown() + } + }) + + def lowerBound = 2 + def upperBound = 4 + def rampupRate = 0.1 + def partialFill = true + def selectionCount = 1 + def instance = factory + def receive = _route + }).start() + + // first message should create the minimum number of delgates + + pool ! 1 + + (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2) + + var loops = 0 + def loop(t: Int) = { + latch = TestLatch(loops) + count.set(0) + for (m <- 0 until loops) { + pool !!! t + sleepFor(50 millis) + } + } + + // 2 more should go thru without triggering more + + loops = 2 + + loop(500) + latch.await + count.get must be (loops) + + (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2) + + // a whole bunch should max it out + + loops = 10 + loop(500) + latch.await + count.get must be (loops) + + (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (4) + + pool.stop() + } + + "grow as needed under mailbox pressure" in { + // make sure the pool starts at the expected lower limit and grows to the upper as needed + // as influenced by the backlog of messages in the delegate mailboxes + + var latch = TestLatch(3) + val count = new AtomicInteger(0) + + val pool = actorOf( + new Actor with DefaultActorPool + with BoundedCapacityStrategy + with MailboxPressureCapacitor + with SmallestMailboxSelector + with BasicNoBackoffFilter + { + def factory = actorOf(new Actor { + def receive = { + case n: Int => + sleepFor(n millis) + count.incrementAndGet + latch.countDown() + } + }) + + def lowerBound = 2 + def upperBound = 4 + def pressureThreshold = 3 + def rampupRate = 0.1 + def partialFill = true + def selectionCount = 1 + def instance = factory + def receive = _route + }).start() + + var loops = 0 + def loop(t: Int) = { + latch = TestLatch(loops) + count.set(0) + for (m <- 0 until loops) { + pool ! t + } + } + + // send a few messages and observe pool at its lower bound + loops = 3 + loop(500) + latch.await + count.get must be (loops) - // - // send a few messages and observe pool at its lower bound - // - loops = 3 - loop(500) - var done = latch.await(5,TimeUnit.SECONDS) - done must be (true) - counter.get must be (loops) (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2) - // // send a bunch over the theshold and observe an increment - // loops = 15 loop(500) - done = latch.await(10,TimeUnit.SECONDS) - done must be (true) - counter.get must be (loops) + latch.await(10 seconds) + count.get must be (loops) + (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be >= (3) - pool stop - } - - // Actor Pool Selector Tests - - @Test def testRoundRobinSelector = { - - var latch = new CountDownLatch(2) - val delegates = new java.util.concurrent.ConcurrentHashMap[String, String] - - class TestPool1 extends Actor with DefaultActorPool - with FixedCapacityStrategy - with RoundRobinSelector - with BasicNoBackoffFilter - { - def factory = actorOf(new Actor { - def receive = { - case _ => - delegates put(self.uuid.toString, "") - latch.countDown() - } - }) - - def limit = 1 - def selectionCount = 2 - def rampupRate = 0.1 - def partialFill = true - def instance = factory - def receive = _route + pool.stop() } - val pool1 = actorOf(new TestPool1).start() - pool1 ! "a" - pool1 ! "b" - var done = latch.await(1,TimeUnit.SECONDS) - done must be (true) - delegates.size must be (1) - pool1 stop + "round robin" in { + val latch1 = TestLatch(2) + val delegates = new java.util.concurrent.ConcurrentHashMap[String, String] - class TestPool2 extends Actor with DefaultActorPool - with FixedCapacityStrategy - with RoundRobinSelector - with BasicNoBackoffFilter - { - def factory = actorOf(new Actor { - def receive = { - case _ => - delegates put(self.uuid.toString, "") - latch.countDown() - } - }) + val pool1 = actorOf( + new Actor with DefaultActorPool + with FixedCapacityStrategy + with RoundRobinSelector + with BasicNoBackoffFilter + { + def factory = actorOf(new Actor { + def receive = { + case _ => + delegates put(self.uuid.toString, "") + latch1.countDown() + } + }) - def limit = 2 - def selectionCount = 2 - def rampupRate = 0.1 - def partialFill = false - def instance = factory - def receive = _route + def limit = 1 + def selectionCount = 1 + def rampupRate = 0.1 + def partialFill = true + def instance = factory + def receive = _route + }).start() + + pool1 ! "a" + pool1 ! "b" + + latch1.await + delegates.size must be (1) + + pool1.stop() + + val latch2 = TestLatch(2) + delegates.clear() + + val pool2 = actorOf( + new Actor with DefaultActorPool + with FixedCapacityStrategy + with RoundRobinSelector + with BasicNoBackoffFilter + { + def factory = actorOf(new Actor { + def receive = { + case _ => + delegates put(self.uuid.toString, "") + latch2.countDown() + } + }) + + def limit = 2 + def selectionCount = 1 + def rampupRate = 0.1 + def partialFill = false + def instance = factory + def receive = _route + }).start() + + pool2 ! "a" + pool2 ! "b" + + latch2.await + delegates.size must be (2) + + pool2.stop() } - latch = new CountDownLatch(2) - delegates clear + "backoff" in { + val latch = TestLatch(10) - val pool2 = actorOf(new TestPool2).start() - pool2 ! "a" - pool2 ! "b" - done = latch.await(1, TimeUnit.SECONDS) - done must be (true) - delegates.size must be (2) - pool2 stop - } - - // Actor Pool Filter Tests + val pool = actorOf( + new Actor with DefaultActorPool + with BoundedCapacityStrategy + with MailboxPressureCapacitor + with SmallestMailboxSelector + with Filter + with RunningMeanBackoff + with BasicRampup + { + def factory = actorOf(new Actor { + def receive = { + case n: Int => + sleepFor(n millis) + latch.countDown() + } + }) - // - // reuse previous test to max pool then observe filter reducing capacity over time - // - @Test def testBoundedCapacityActorPoolWithBackoffFilter = { + def lowerBound = 1 + def upperBound = 5 + def pressureThreshold = 1 + def partialFill = true + def selectionCount = 1 + def rampupRate = 0.1 + def backoffRate = 0.50 + def backoffThreshold = 0.50 + def instance = factory + def receive = _route + }).start() - var latch = new CountDownLatch(10) - class TestPool extends Actor with DefaultActorPool - with BoundedCapacityStrategy - with MailboxPressureCapacitor - with SmallestMailboxSelector - with Filter - with RunningMeanBackoff - with BasicRampup - { - def factory = actorOf(new Actor { - def receive = { - case n:Int => - Thread.sleep(n) - latch.countDown() - } - }) + // put some pressure on the pool - def lowerBound = 1 - def upperBound = 5 - def pressureThreshold = 1 - def partialFill = true - def selectionCount = 1 - def rampupRate = 0.1 - def backoffRate = 0.50 - def backoffThreshold = 0.50 - def instance = factory - def receive = _route + for (m <- 0 to 10) pool ! 250 + + sleepFor(5 millis) + + val z = (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size + + z must be >= (2) + + // let it cool down + + for (m <- 0 to 3) { + pool ! 1 + sleepFor(500 millis) + } + + (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be <= (z) + + pool.stop() } - - - // - // put some pressure on the pool - // - val pool = actorOf(new TestPool).start() - for (m <- 0 to 10) pool ! 250 - Thread.sleep(5) - val z = (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size - z must be >= (2) - var done = latch.await(10,TimeUnit.SECONDS) - done must be (true) - - - // - // - // - for (m <- 0 to 3) { - pool ! 1 - Thread.sleep(500) - } - (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be <= (z) - - pool stop } } + diff --git a/akka-actor/src/main/java/akka/event/JavaEventHandler.java b/akka-actor/src/main/java/akka/event/JavaEventHandler.java new file mode 100644 index 0000000000..7e6e2d4143 --- /dev/null +++ b/akka-actor/src/main/java/akka/event/JavaEventHandler.java @@ -0,0 +1,35 @@ +package akka.event; + + +import akka.actor.ActorRef; + +/** + * Java API for Akka EventHandler + */ + +public class JavaEventHandler { + + + public static void notify(Object message){ + EventHandler$.MODULE$.notify(message); + } + + public static void debug(ActorRef instance, Object message){ + EventHandler$.MODULE$.debug(instance, message); + } + + public static void info(ActorRef instance, Object message){ + EventHandler$.MODULE$.info(instance,message); + } + + public static void warning(ActorRef instance, Object message){ + EventHandler$.MODULE$.warning(instance,message); + } + + public static void error(ActorRef instance, Object message){ + EventHandler$.MODULE$.debug(instance,message); + } + +} + + diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 71f916b6e8..6fa44452e0 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -88,9 +88,6 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal protected[akka] var _uuid = newUuid @volatile protected[this] var _status: ActorRefInternals.StatusType = ActorRefInternals.UNSTARTED - @volatile - protected[akka] var _futureTimeout: Option[ScheduledFuture[AnyRef]] = None - protected[akka] val guard = new ReentrantGuard /** * User overridable callback/setting. @@ -112,6 +109,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal * Defines the default timeout for '!!' and '!!!' invocations, * e.g. the timeout for the future returned by the call to '!!' and '!!!'. */ + @deprecated("Will be replaced by implicit-scoped timeout on all methods that needs it, will default to timeout specified in config") @BeanProperty @volatile var timeout: Long = Actor.TIMEOUT @@ -186,11 +184,13 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal /** * Returns on which node this actor lives if None it lives in the local ActorRegistry */ + @deprecated("Remoting will become fully transparent in the future") def homeAddress: Option[InetSocketAddress] /** * Java API.

*/ + @deprecated("Remoting will become fully transparent in the future") def getHomeAddress(): InetSocketAddress = homeAddress getOrElse null /** @@ -256,6 +256,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal /** * Is the actor able to handle the message passed in as arguments? */ + @deprecated("Will be removed without replacement, it's just not reliable in the face of `become` and `unbecome`") def isDefinedAt(message: Any): Boolean = actor.isDefinedAt(message) /** @@ -381,23 +382,27 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal /** * Returns the class for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClass: Class[_ <: Actor] /** * Akka Java API.

* Returns the class for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def getActorClass(): Class[_ <: Actor] = actorClass /** * Returns the class name for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClassName: String /** * Akka Java API.

* Returns the class name for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def getActorClassName(): String = actorClassName /** @@ -572,20 +577,6 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal } override def toString = "Actor[" + id + ":" + uuid + "]" - - protected[akka] def checkReceiveTimeout = { - cancelReceiveTimeout - if (receiveTimeout.isDefined && dispatcher.mailboxSize(this) <= 0) { //Only reschedule if desired and there are currently no more messages to be processed - _futureTimeout = Some(Scheduler.scheduleOnce(this, ReceiveTimeout, receiveTimeout.get, TimeUnit.MILLISECONDS)) - } - } - - protected[akka] def cancelReceiveTimeout = { - if (_futureTimeout.isDefined) { - _futureTimeout.get.cancel(true) - _futureTimeout = None - } - } } /** @@ -598,7 +589,10 @@ class LocalActorRef private[akka] ( val homeAddress: Option[InetSocketAddress], val clientManaged: Boolean = false) extends ActorRef with ScalaActorRef { + protected[akka] val guard = new ReentrantGuard + @volatile + protected[akka] var _futureTimeout: Option[ScheduledFuture[AnyRef]] = None @volatile private[akka] lazy val _linkedActors = new ConcurrentHashMap[Uuid, ActorRef] @volatile @@ -650,11 +644,13 @@ class LocalActorRef private[akka] ( /** * Returns the class for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClass: Class[_ <: Actor] = actor.getClass.asInstanceOf[Class[_ <: Actor]] /** * Returns the class name for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClassName: String = actorClass.getName /** @@ -1102,6 +1098,21 @@ class LocalActorRef private[akka] ( actor.preStart // run actor preStart Actor.registry.register(this) } + + + protected[akka] def checkReceiveTimeout = { + cancelReceiveTimeout + if (receiveTimeout.isDefined && dispatcher.mailboxSize(this) <= 0) { //Only reschedule if desired and there are currently no more messages to be processed + _futureTimeout = Some(Scheduler.scheduleOnce(this, ReceiveTimeout, receiveTimeout.get, TimeUnit.MILLISECONDS)) + } + } + + protected[akka] def cancelReceiveTimeout = { + if (_futureTimeout.isDefined) { + _futureTimeout.get.cancel(true) + _futureTimeout = None + } + } } /** @@ -1173,6 +1184,7 @@ private[akka] case class RemoteActorRef private[akka] ( protected[akka] def registerSupervisorAsRemoteActor: Option[Uuid] = None // ==== NOT SUPPORTED ==== + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClass: Class[_ <: Actor] = unsupported def dispatcher_=(md: MessageDispatcher): Unit = unsupported def dispatcher: MessageDispatcher = unsupported diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 37752b1373..815ab1076c 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -4,6 +4,7 @@ package akka.actor import akka.util._ +import akka.event.EventHandler import scala.collection.mutable import java.util.concurrent.ScheduledFuture @@ -137,7 +138,7 @@ object FSM { * timerActive_? ("tock") * */ -trait FSM[S, D] { +trait FSM[S, D] extends ListenerManagement { this: Actor => import FSM._ @@ -146,7 +147,9 @@ trait FSM[S, D] { type Timeout = Option[Duration] type TransitionHandler = PartialFunction[(S, S), Unit] - /* DSL */ + /****************************************** + * DSL + ******************************************/ /** * Insert a new StateFunction at the end of the processing chain for the @@ -323,15 +326,25 @@ trait FSM[S, D] { makeTransition(currentState) } - /**FSM State data and default handlers */ + /****************************************************************** + * PRIVATE IMPLEMENTATION DETAILS + ******************************************************************/ + + /* + * FSM State data and current timeout handling + */ private var currentState: State = _ private var timeoutFuture: Option[ScheduledFuture[AnyRef]] = None private var generation: Long = 0L - private var transitionCallBackList: List[ActorRef] = Nil - + /* + * Timer handling + */ private val timers = mutable.Map[String, Timer]() + /* + * State definitions + */ private val stateFunctions = mutable.Map[S, StateFunction]() private val stateTimeouts = mutable.Map[S, Timeout]() @@ -345,23 +358,38 @@ trait FSM[S, D] { } } + /* + * unhandled event handler + */ private val handleEventDefault: StateFunction = { case Event(value, stateData) => stay } private var handleEvent: StateFunction = handleEventDefault + /* + * termination handling + */ private var terminateEvent: PartialFunction[StopEvent[S,D], Unit] = { case StopEvent(Failure(cause), _, _) => case StopEvent(reason, _, _) => } + /* + * transition handling + */ private var transitionEvent: List[TransitionHandler] = Nil private def handleTransition(prev : S, next : S) { val tuple = (prev, next) for (te <- transitionEvent) { if (te.isDefinedAt(tuple)) te(tuple) } } + // ListenerManagement shall not start() or stop() listener actors + override protected val manageLifeCycleOfListeners = false + + /********************************************* + * Main actor receive() method + *********************************************/ override final protected def receive: Receive = { case TimeoutMarker(gen) => if (generation == gen) { @@ -375,11 +403,16 @@ trait FSM[S, D] { } } case SubscribeTransitionCallBack(actorRef) => - // send current state back as reference point - actorRef ! CurrentState(self, currentState.stateName) - transitionCallBackList ::= actorRef + addListener(actorRef) + // send current state back as reference point + try { + actorRef ! CurrentState(self, currentState.stateName) + } catch { + case e : ActorInitializationException => + EventHandler.warning(this, "trying to register not running listener") + } case UnsubscribeTransitionCallBack(actorRef) => - transitionCallBackList = transitionCallBackList.filterNot(_ == actorRef) + removeListener(actorRef) case value => { if (timeoutFuture.isDefined) { timeoutFuture.get.cancel(true) @@ -411,10 +444,7 @@ trait FSM[S, D] { } else { if (currentState.stateName != nextState.stateName) { handleTransition(currentState.stateName, nextState.stateName) - if (!transitionCallBackList.isEmpty) { - val transition = Transition(self, currentState.stateName, nextState.stateName) - transitionCallBackList.foreach(_ ! transition) - } + notifyListeners(Transition(self, currentState.stateName, nextState.stateName)) } applyState(nextState) } @@ -425,7 +455,7 @@ trait FSM[S, D] { val timeout = if (currentState.timeout.isDefined) currentState.timeout else stateTimeouts(currentState.stateName) if (timeout.isDefined) { val t = timeout.get - if (t.length >= 0) { + if (t.finite_? && t.length >= 0) { timeoutFuture = Some(Scheduler.scheduleOnce(self, TimeoutMarker(generation), t.length, t.unit)) } } diff --git a/akka-actor/src/main/scala/akka/actor/Supervisor.scala b/akka-actor/src/main/scala/akka/actor/Supervisor.scala index 22abafaccc..e32b515ae5 100644 --- a/akka-actor/src/main/scala/akka/actor/Supervisor.scala +++ b/akka-actor/src/main/scala/akka/actor/Supervisor.scala @@ -4,7 +4,6 @@ package akka.actor -import akka.config.Supervision._ import akka.AkkaException import akka.util._ import ReflectiveAccess._ @@ -81,7 +80,7 @@ case class SupervisorFactory(val config: SupervisorConfig) { def newInstance: Supervisor = newInstanceFor(config) def newInstanceFor(config: SupervisorConfig): Supervisor = { - val supervisor = new Supervisor(config.restartStrategy) + val supervisor = new Supervisor(config.restartStrategy, config.maxRestartsHandler) supervisor.configure(config) supervisor.start supervisor @@ -100,13 +99,13 @@ case class SupervisorFactory(val config: SupervisorConfig) { * * @author Jonas Bonér */ -sealed class Supervisor(handler: FaultHandlingStrategy) { +sealed class Supervisor(handler: FaultHandlingStrategy, maxRestartsHandler: (ActorRef, MaximumNumberOfRestartsWithinTimeRangeReached) => Unit) { import Supervisor._ private val _childActors = new ConcurrentHashMap[String, List[ActorRef]] private val _childSupervisors = new CopyOnWriteArrayList[Supervisor] - private[akka] val supervisor = actorOf(new SupervisorActor(handler)).start() + private[akka] val supervisor = actorOf(new SupervisorActor(handler,maxRestartsHandler)).start() def uuid = supervisor.uuid @@ -127,7 +126,8 @@ sealed class Supervisor(handler: FaultHandlingStrategy) { _childActors.values.toArray.toList.asInstanceOf[List[Supervisor]] def configure(config: SupervisorConfig): Unit = config match { - case SupervisorConfig(_, servers) => + case SupervisorConfig(_, servers, _) => + servers.map(server => server match { case Supervise(actorRef, lifeCycle, registerAsRemoteService) => @@ -143,7 +143,7 @@ sealed class Supervisor(handler: FaultHandlingStrategy) { supervisor.link(actorRef) if (registerAsRemoteService) Actor.remote.register(actorRef) - case supervisorConfig @ SupervisorConfig(_, _) => // recursive supervisor configuration + case supervisorConfig @ SupervisorConfig(_, _,_) => // recursive supervisor configuration val childSupervisor = Supervisor(supervisorConfig) supervisor.link(childSupervisor.supervisor) _childSupervisors.add(childSupervisor) @@ -156,9 +156,10 @@ sealed class Supervisor(handler: FaultHandlingStrategy) { * * @author Jonas Bonér */ -final class SupervisorActor private[akka] (handler: FaultHandlingStrategy) extends Actor { +final class SupervisorActor private[akka] (handler: FaultHandlingStrategy, maxRestartsHandler: (ActorRef,MaximumNumberOfRestartsWithinTimeRangeReached) => Unit) extends Actor { self.faultHandler = handler + override def postStop(): Unit = { val i = self.linkedActors.values.iterator while(i.hasNext) { @@ -169,11 +170,8 @@ final class SupervisorActor private[akka] (handler: FaultHandlingStrategy) exten } def receive = { - // FIXME add a way to respond to MaximumNumberOfRestartsWithinTimeRangeReached in declaratively configured Supervisor - case MaximumNumberOfRestartsWithinTimeRangeReached( - victim, maxNrOfRetries, withinTimeRange, lastExceptionCausingRestart) => + case max@MaximumNumberOfRestartsWithinTimeRangeReached(_,_,_,_) => maxRestartsHandler(self, max) case unknown => throw new SupervisorException( "SupervisorActor can not respond to messages.\n\tUnknown message [" + unknown + "]") } } - diff --git a/akka-actor/src/main/scala/akka/config/SupervisionConfig.scala b/akka-actor/src/main/scala/akka/config/SupervisionConfig.scala index 9f63c64bc1..6b66f3415d 100644 --- a/akka-actor/src/main/scala/akka/config/SupervisionConfig.scala +++ b/akka-actor/src/main/scala/akka/config/SupervisionConfig.scala @@ -4,8 +4,9 @@ package akka.config -import akka.actor.{ActorRef} import akka.dispatch.MessageDispatcher +import akka.actor.{MaximumNumberOfRestartsWithinTimeRangeReached, ActorRef} +import akka.japi.{Procedure2, Procedure} case class RemoteAddress(val hostname: String, val port: Int) @@ -21,9 +22,10 @@ object Supervision { sealed abstract class LifeCycle extends ConfigElement sealed abstract class FaultHandlingStrategy(val trapExit: List[Class[_ <: Throwable]]) extends ConfigElement - case class SupervisorConfig(restartStrategy: FaultHandlingStrategy, worker: List[Server]) extends Server { + case class SupervisorConfig(restartStrategy: FaultHandlingStrategy, worker: List[Server], maxRestartsHandler: (ActorRef,MaximumNumberOfRestartsWithinTimeRangeReached)=> Unit = {(aRef,max)=>()}) extends Server { //Java API def this(restartStrategy: FaultHandlingStrategy, worker: Array[Server]) = this(restartStrategy,worker.toList) + def this(restartStrategy: FaultHandlingStrategy, worker: Array[Server], restartHandler:Procedure2[ActorRef,MaximumNumberOfRestartsWithinTimeRangeReached]) = this(restartStrategy,worker.toList, {(aRef,max)=>restartHandler.apply(aRef,max)}) } class Supervise(val actorRef: ActorRef, val lifeCycle: LifeCycle, val registerAsRemoteService: Boolean = false) extends Server { diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 7c52e716f2..04ff6a9504 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -20,12 +20,12 @@ import java.util.concurrent.TimeUnit *

  *   val dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("name")
  *   dispatcher
- *     .withNewThreadPoolWithBoundedBlockingQueue(100)
+ *     .withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100)
  *     .setCorePoolSize(16)
  *     .setMaxPoolSize(128)
  *     .setKeepAliveTimeInMillis(60000)
  *     .setRejectionPolicy(new CallerRunsPolicy)
- *     .buildThreadPool
+ *     .build
  * 
*

* Java API. Dispatcher factory. @@ -34,12 +34,12 @@ import java.util.concurrent.TimeUnit *

  *   MessageDispatcher dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("name");
  *   dispatcher
- *     .withNewThreadPoolWithBoundedBlockingQueue(100)
+ *     .withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100)
  *     .setCorePoolSize(16)
  *     .setMaxPoolSize(128)
  *     .setKeepAliveTimeInMillis(60000)
- *     .setRejectionPolicy(new CallerRunsPolicy)
- *     .buildThreadPool();
+ *     .setRejectionPolicy(new CallerRunsPolicy())
+ *     .build();
  * 
*

* diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 315da653f0..3e2a7eb620 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -14,6 +14,8 @@ import java.util.concurrent.locks.ReentrantLock import java.util.concurrent. {ConcurrentLinkedQueue, TimeUnit, Callable} import java.util.concurrent.TimeUnit.{NANOSECONDS => NANOS, MILLISECONDS => MILLIS} import java.util.concurrent.atomic. {AtomicBoolean, AtomicInteger} +import java.lang.{Iterable => JIterable} +import java.util.{LinkedList => JLinkedList} import annotation.tailrec class FutureTimeoutException(message: String) extends AkkaException(message) @@ -152,29 +154,47 @@ object Futures { def reduce[T <: AnyRef, R >: T](futures: java.lang.Iterable[Future[T]], timeout: Long, fun: akka.japi.Function2[R, T, T]): Future[R] = reduce(scala.collection.JavaConversions.iterableAsScalaIterable(futures), timeout)(fun.apply _) - import scala.collection.mutable.Builder - import scala.collection.generic.CanBuildFrom - /** - * Simple version of Futures.traverse. Transforms a Traversable[Future[A]] into a Future[Traversable[A]]. + * Java API. + * Simple version of Futures.traverse. Transforms a java.lang.Iterable[Future[A]] into a Future[java.util.LinkedList[A]]. * Useful for reducing many Futures into a single Future. */ - def sequence[A, M[_] <: Traversable[_]](in: M[Future[A]], timeout: Long = Actor.TIMEOUT)(implicit cbf: CanBuildFrom[M[Future[A]], A, M[A]]): Future[M[A]] = - in.foldLeft(new DefaultCompletableFuture[Builder[A, M[A]]](timeout).completeWithResult(cbf(in)): Future[Builder[A, M[A]]])((fr, fa) => for (r <- fr; a <- fa.asInstanceOf[Future[A]]) yield (r += a)).map(_.result) + def sequence[A](in: JIterable[Future[A]], timeout: Long): Future[JLinkedList[A]] = + scala.collection.JavaConversions.iterableAsScalaIterable(in).foldLeft(Future(new JLinkedList[A]()))((fr, fa) => + for (r <- fr; a <- fa) yield { + r add a + r + }) /** - * Transforms a Traversable[A] into a Future[Traversable[B]] using the provided Function A => Future[B]. - * This is useful for performing a parallel map. For example, to apply a function to all items of a list - * in parallel: - *

-   * val myFutureList = Futures.traverse(myList)(x => Future(myFunc(x)))
-   * 
+ * Java API. + * Simple version of Futures.traverse. Transforms a java.lang.Iterable[Future[A]] into a Future[java.util.LinkedList[A]]. + * Useful for reducing many Futures into a single Future. */ - def traverse[A, B, M[_] <: Traversable[_]](in: M[A], timeout: Long = Actor.TIMEOUT)(fn: A => Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): Future[M[B]] = - in.foldLeft(new DefaultCompletableFuture[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a) => - val fb = fn(a.asInstanceOf[A]) - for (r <- fr; b <-fb) yield (r += b) - }.map(_.result) + def sequence[A](in: JIterable[Future[A]]): Future[JLinkedList[A]] = sequence(in, Actor.TIMEOUT) + + /** + * Java API. + * Transforms a java.lang.Iterable[A] into a Future[java.util.LinkedList[B]] using the provided Function A => Future[B]. + * This is useful for performing a parallel map. For example, to apply a function to all items of a list + * in parallel. + */ + def traverse[A, B](in: JIterable[A], timeout: Long, fn: JFunc[A,Future[B]]): Future[JLinkedList[B]] = + scala.collection.JavaConversions.iterableAsScalaIterable(in).foldLeft(Future(new JLinkedList[B]())){(fr, a) => + val fb = fn(a) + for (r <- fr; b <- fb) yield { + r add b + r + } + } + + /** + * Java API. + * Transforms a java.lang.Iterable[A] into a Future[java.util.LinkedList[B]] using the provided Function A => Future[B]. + * This is useful for performing a parallel map. For example, to apply a function to all items of a list + * in parallel. + */ + def traverse[A, B](in: JIterable[A], fn: JFunc[A,Future[B]]): Future[JLinkedList[B]] = traverse(in, Actor.TIMEOUT, fn) // ===================================== // Deprecations @@ -225,6 +245,30 @@ object Future { val future = new DefaultCompletableFuture[Any](timeout) def !(msg: Any) = future << msg } + + import scala.collection.mutable.Builder + import scala.collection.generic.CanBuildFrom + + /** + * Simple version of Futures.traverse. Transforms a Traversable[Future[A]] into a Future[Traversable[A]]. + * Useful for reducing many Futures into a single Future. + */ + def sequence[A, M[_] <: Traversable[_]](in: M[Future[A]], timeout: Long = Actor.TIMEOUT)(implicit cbf: CanBuildFrom[M[Future[A]], A, M[A]]): Future[M[A]] = + in.foldLeft(new DefaultCompletableFuture[Builder[A, M[A]]](timeout).completeWithResult(cbf(in)): Future[Builder[A, M[A]]])((fr, fa) => for (r <- fr; a <- fa.asInstanceOf[Future[A]]) yield (r += a)).map(_.result) + + /** + * Transforms a Traversable[A] into a Future[Traversable[B]] using the provided Function A => Future[B]. + * This is useful for performing a parallel map. For example, to apply a function to all items of a list + * in parallel: + *
+   * val myFutureList = Futures.traverse(myList)(x => Future(myFunc(x)))
+   * 
+ */ + def traverse[A, B, M[_] <: Traversable[_]](in: M[A], timeout: Long = Actor.TIMEOUT)(fn: A => Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): Future[M[B]] = + in.foldLeft(new DefaultCompletableFuture[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a) => + val fb = fn(a.asInstanceOf[A]) + for (r <- fr; b <-fb) yield (r += b) + }.map(_.result) } sealed trait Future[+T] { diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index 20cd33b311..5d7bbf0a94 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -20,6 +20,12 @@ trait Procedure[T] { def apply(param: T): Unit } +/** A Procedure is like a Function, but it doesn't produce a return value + */ +trait Procedure2[T1,T2] { + def apply(param: T1, param2:T2): Unit +} + /** * An executable piece of code that takes no parameters and doesn't return any value. */ diff --git a/akka-actor/src/main/scala/akka/routing/Pool.scala b/akka-actor/src/main/scala/akka/routing/Pool.scala index 8d431541f7..6ab6aa0c4d 100644 --- a/akka-actor/src/main/scala/akka/routing/Pool.scala +++ b/akka-actor/src/main/scala/akka/routing/Pool.scala @@ -104,7 +104,7 @@ trait DefaultActorPool extends ActorPool { this: Actor => /** * Selectors * These traits define how, when a message needs to be routed, delegate(s) are chosen from the pool - **/ + */ /** * Returns the set of delegates with the least amount of message backlog. @@ -141,7 +141,7 @@ trait RoundRobinSelector { else selectionCount val set = - for (i <- 0 to take) yield { + for (i <- 0 until take) yield { _last = (_last + 1) % length delegates(_last) } diff --git a/akka-actor/src/main/scala/akka/util/ListenerManagement.scala b/akka-actor/src/main/scala/akka/util/ListenerManagement.scala index 777c048d70..efeb482377 100644 --- a/akka-actor/src/main/scala/akka/util/ListenerManagement.scala +++ b/akka-actor/src/main/scala/akka/util/ListenerManagement.scala @@ -5,8 +5,7 @@ package akka.util import java.util.concurrent.ConcurrentSkipListSet - -import akka.actor.ActorRef +import akka.actor.{ActorInitializationException, ActorRef} /** * A manager for listener actors. Intended for mixin by observables. @@ -46,7 +45,8 @@ trait ListenerManagement { def hasListeners: Boolean = !listeners.isEmpty /** - * Checks if a specfic listener is registered. + * Checks if a specfic listener is registered. ActorInitializationException leads to removal of listener if that + * one isShutdown. */ def hasListener(listener: ActorRef): Boolean = listeners.contains(listener) @@ -56,13 +56,20 @@ trait ListenerManagement { val iterator = listeners.iterator while (iterator.hasNext) { val listener = iterator.next - if (listener.isRunning) listener ! msg + // Uncomment if those exceptions are so frequent as to bottleneck + // if (listener.isShutdown) iterator.remove() else + try { + listener ! msg + } catch { + case e : ActorInitializationException => + if (listener.isShutdown) iterator.remove() + } } } } /** - * Execute f with each listener as argument. + * Execute f with each listener as argument. ActorInitializationException is not handled. */ protected[akka] def foreachListener(f: (ActorRef) => Unit) { val iterator = listeners.iterator diff --git a/akka-docs/Makefile b/akka-docs/Makefile index fedddbee17..49f649367f 100644 --- a/akka-docs/Makefile +++ b/akka-docs/Makefile @@ -6,16 +6,24 @@ SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build +EASYINSTALL = easy_install +LOCALPACKAGES = $(shell pwd)/$(BUILDDIR)/site-packages +PYGMENTSDIR = _sphinx/pygments # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html singlehtml latex pdf +# Set python path to include local packages for pygments styles. +PYTHONPATH += $(LOCALPACKAGES) +export PYTHONPATH + +.PHONY: help clean pygments html singlehtml latex pdf help: @echo "Please use \`make ' where is one of" + @echo " pygments to locally install the custom pygments styles" @echo " html to make standalone HTML files" @echo " singlehtml to make a single large HTML file" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @@ -24,7 +32,15 @@ help: clean: -rm -rf $(BUILDDIR)/* -html: +pygments: + mkdir -p $(LOCALPACKAGES) + $(EASYINSTALL) --install-dir $(LOCALPACKAGES) $(PYGMENTSDIR) + -rm -rf $(PYGMENTSDIR)/*.egg-info $(PYGMENTSDIR)/build $(PYGMENTSDIR)/temp + @echo + @echo "Custom pygments styles have been installed." + @echo + +html: pygments $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." @@ -41,9 +57,8 @@ latex: @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." -pdf: +pdf: pygments $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - diff --git a/akka-docs/_sphinx/exts/includecode.py b/akka-docs/_sphinx/exts/includecode.py new file mode 100644 index 0000000000..c12ddfa7f4 --- /dev/null +++ b/akka-docs/_sphinx/exts/includecode.py @@ -0,0 +1,138 @@ +import os +import codecs +from os import path + +from docutils import nodes +from docutils.parsers.rst import Directive, directives + +class IncludeCode(Directive): + """ + Include a code example from a file with sections delimited with special comments. + """ + + has_content = False + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = False + option_spec = { + 'section': directives.unchanged_required, + 'comment': directives.unchanged_required, + 'marker': directives.unchanged_required, + 'include': directives.unchanged_required, + 'exclude': directives.unchanged_required, + 'hideexcludes': directives.flag, + 'linenos': directives.flag, + 'language': directives.unchanged_required, + 'encoding': directives.encoding, + 'prepend': directives.unchanged_required, + 'append': directives.unchanged_required, + } + + def run(self): + document = self.state.document + arg0 = self.arguments[0] + (filename, sep, section) = arg0.partition('#') + + if not document.settings.file_insertion_enabled: + return [document.reporter.warning('File insertion disabled', + line=self.lineno)] + env = document.settings.env + if filename.startswith('/') or filename.startswith(os.sep): + rel_fn = filename[1:] + else: + docdir = path.dirname(env.doc2path(env.docname, base=None)) + rel_fn = path.join(docdir, filename) + try: + fn = path.join(env.srcdir, rel_fn) + except UnicodeDecodeError: + # the source directory is a bytestring with non-ASCII characters; + # let's try to encode the rel_fn in the file system encoding + rel_fn = rel_fn.encode(sys.getfilesystemencoding()) + fn = path.join(env.srcdir, rel_fn) + + encoding = self.options.get('encoding', env.config.source_encoding) + codec_info = codecs.lookup(encoding) + try: + f = codecs.StreamReaderWriter(open(fn, 'U'), + codec_info[2], codec_info[3], 'strict') + lines = f.readlines() + f.close() + except (IOError, OSError): + return [document.reporter.warning( + 'Include file %r not found or reading it failed' % filename, + line=self.lineno)] + except UnicodeError: + return [document.reporter.warning( + 'Encoding %r used for reading included file %r seems to ' + 'be wrong, try giving an :encoding: option' % + (encoding, filename))] + + comment = self.options.get('comment', '//') + marker = self.options.get('marker', comment + '#') + lenm = len(marker) + if not section: + section = self.options.get('section') + include_sections = self.options.get('include', '') + exclude_sections = self.options.get('exclude', '') + include = set(include_sections.split(',')) if include_sections else set() + exclude = set(exclude_sections.split(',')) if exclude_sections else set() + hideexcludes = 'hideexcludes' in self.options + if section: + include |= set([section]) + within = set() + res = [] + excluding = False + for line in lines: + index = line.find(marker) + if index >= 0: + section_name = line[index+lenm:].strip() + if section_name in within: + within ^= set([section_name]) + if excluding and not (exclude & within): + excluding = False + else: + within |= set([section_name]) + if not excluding and (exclude & within): + excluding = True + if not hideexcludes: + res.append(' ' * index + comment + ' ' + section_name.replace('-', ' ') + ' ...\n') + elif not (exclude & within) and (not include or (include & within)): + res.append(line) + lines = res + + def countwhile(predicate, iterable): + count = 0 + for x in iterable: + if predicate(x): + count += 1 + else: + return count + + nonempty = filter(lambda l: l.strip(), lines) + tabcounts = map(lambda l: countwhile(lambda c: c == ' ', l), nonempty) + tabshift = min(tabcounts) if tabcounts else 0 + + if tabshift > 0: + lines = map(lambda l: l[tabshift:] if len(l) > tabshift else l, lines) + + prepend = self.options.get('prepend') + append = self.options.get('append') + if prepend: + lines.insert(0, prepend + '\n') + if append: + lines.append(append + '\n') + + text = ''.join(lines) + retnode = nodes.literal_block(text, text, source=fn) + retnode.line = 1 + retnode.attributes['line_number'] = self.lineno + if self.options.get('language', ''): + retnode['language'] = self.options['language'] + if 'linenos' in self.options: + retnode['linenos'] = True + document.settings.env.note_dependency(rel_fn) + return [retnode] + +def setup(app): + app.require_sphinx('1.0') + app.add_directive('includecode', IncludeCode) diff --git a/akka-docs/_sphinx/pygments/setup.py b/akka-docs/_sphinx/pygments/setup.py new file mode 100644 index 0000000000..7c86a6a681 --- /dev/null +++ b/akka-docs/_sphinx/pygments/setup.py @@ -0,0 +1,19 @@ +""" +Akka syntax styles for Pygments. +""" + +from setuptools import setup + +entry_points = """ +[pygments.styles] +simple = styles.simple:SimpleStyle +""" + +setup( + name = 'akkastyles', + version = '0.1', + description = __doc__, + author = "Akka", + packages = ['styles'], + entry_points = entry_points +) diff --git a/akka-docs/_sphinx/pygments/styles/__init__.py b/akka-docs/_sphinx/pygments/styles/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/akka-docs/themes/akka/pygments/akka.py b/akka-docs/_sphinx/pygments/styles/simple.py similarity index 93% rename from akka-docs/themes/akka/pygments/akka.py rename to akka-docs/_sphinx/pygments/styles/simple.py index af9fe61bf9..bdf3c7878e 100644 --- a/akka-docs/themes/akka/pygments/akka.py +++ b/akka-docs/_sphinx/pygments/styles/simple.py @@ -3,7 +3,7 @@ pygments.styles.akka ~~~~~~~~~~~~~~~~~~~~~~~~ - Akka style for Scala highlighting. + Simple style for Scala highlighting. """ from pygments.style import Style @@ -11,9 +11,9 @@ from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace -class AkkaStyle(Style): +class SimpleStyle(Style): """ - Akka style for Scala highlighting. + Simple style for Scala highlighting. """ background_color = "#f0f0f0" diff --git a/akka-docs/_static/akka.png b/akka-docs/_sphinx/static/akka.png similarity index 100% rename from akka-docs/_static/akka.png rename to akka-docs/_sphinx/static/akka.png diff --git a/akka-docs/_static/logo.png b/akka-docs/_sphinx/static/logo.png similarity index 100% rename from akka-docs/_static/logo.png rename to akka-docs/_sphinx/static/logo.png diff --git a/akka-docs/themes/akka/layout.html b/akka-docs/_sphinx/themes/akka/layout.html similarity index 100% rename from akka-docs/themes/akka/layout.html rename to akka-docs/_sphinx/themes/akka/layout.html diff --git a/akka-docs/themes/akka/static/akka.css_t b/akka-docs/_sphinx/themes/akka/static/akka.css_t similarity index 99% rename from akka-docs/themes/akka/static/akka.css_t rename to akka-docs/_sphinx/themes/akka/static/akka.css_t index 7c417e9917..f05e86bb6a 100644 --- a/akka-docs/themes/akka/static/akka.css_t +++ b/akka-docs/_sphinx/themes/akka/static/akka.css_t @@ -30,13 +30,11 @@ div.footer { /* link colors and text decoration */ a:link { - font-weight: bold; text-decoration: none; color: {{ theme_linkcolor }}; } a:visited { - font-weight: bold; text-decoration: none; color: {{ theme_visitedlinkcolor }}; } diff --git a/akka-docs/themes/akka/static/alert_info_32.png b/akka-docs/_sphinx/themes/akka/static/alert_info_32.png similarity index 100% rename from akka-docs/themes/akka/static/alert_info_32.png rename to akka-docs/_sphinx/themes/akka/static/alert_info_32.png diff --git a/akka-docs/themes/akka/static/alert_warning_32.png b/akka-docs/_sphinx/themes/akka/static/alert_warning_32.png similarity index 100% rename from akka-docs/themes/akka/static/alert_warning_32.png rename to akka-docs/_sphinx/themes/akka/static/alert_warning_32.png diff --git a/akka-docs/themes/akka/static/bg-page.png b/akka-docs/_sphinx/themes/akka/static/bg-page.png similarity index 100% rename from akka-docs/themes/akka/static/bg-page.png rename to akka-docs/_sphinx/themes/akka/static/bg-page.png diff --git a/akka-docs/themes/akka/static/bullet_orange.png b/akka-docs/_sphinx/themes/akka/static/bullet_orange.png similarity index 100% rename from akka-docs/themes/akka/static/bullet_orange.png rename to akka-docs/_sphinx/themes/akka/static/bullet_orange.png diff --git a/akka-docs/themes/akka/theme.conf b/akka-docs/_sphinx/themes/akka/theme.conf similarity index 66% rename from akka-docs/themes/akka/theme.conf rename to akka-docs/_sphinx/themes/akka/theme.conf index 7f45fd1718..620c88f2ae 100644 --- a/akka-docs/themes/akka/theme.conf +++ b/akka-docs/_sphinx/themes/akka/theme.conf @@ -7,6 +7,6 @@ pygments_style = friendly full_logo = false textcolor = #333333 headingcolor = #0c3762 -linkcolor = #dc3c01 -visitedlinkcolor = #892601 -hoverlinkcolor = #ff4500 +linkcolor = #0c3762 +visitedlinkcolor = #0c3762 +hoverlinkcolor = #0c3762 diff --git a/akka-docs/conf.py b/akka-docs/conf.py index 4ff27f40bb..209f747afc 100644 --- a/akka-docs/conf.py +++ b/akka-docs/conf.py @@ -7,7 +7,8 @@ import sys, os # -- General configuration ----------------------------------------------------- -extensions = ['sphinx.ext.todo'] +sys.path.append(os.path.abspath('_sphinx/exts')) +extensions = ['sphinx.ext.todo', 'includecode'] templates_path = ['_templates'] source_suffix = '.rst' @@ -19,8 +20,10 @@ copyright = u'2009-2011, Scalable Solutions AB' version = '1.1' release = '1.1' -pygments_style = 'akka' +pygments_style = 'simple' highlight_language = 'scala' +add_function_parentheses = False +show_authors = True # -- Options for HTML output --------------------------------------------------- @@ -28,13 +31,13 @@ html_theme = 'akka' html_theme_options = { 'full_logo': 'true' } -html_theme_path = ['themes'] +html_theme_path = ['_sphinx/themes'] html_title = 'Akka Documentation' -html_logo = '_static/logo.png' +html_logo = '_sphinx/static/logo.png' #html_favicon = None -html_static_path = ['_static'] +html_static_path = ['_sphinx/static'] html_last_updated_fmt = '%b %d, %Y' #html_sidebars = {} @@ -62,4 +65,4 @@ latex_elements = { 'preamble': '\\definecolor{VerbatimColor}{rgb}{0.935,0.935,0.935}' } -# latex_logo = '_static/akka.png' +# latex_logo = '_sphinx/static/akka.png' diff --git a/akka-docs/dev/documentation.rst b/akka-docs/dev/documentation.rst new file mode 100644 index 0000000000..9e280220e6 --- /dev/null +++ b/akka-docs/dev/documentation.rst @@ -0,0 +1,69 @@ + +.. highlightlang:: rest + +.. _documentation: + +############### + Documentation +############### + +The Akka documentation uses `reStructuredText`_ as its markup language and is +built using `Sphinx`_. + +.. _reStructuredText: http://docutils.sourceforge.net/rst.html +.. _sphinx: http://sphinx.pocoo.org + + +Sphinx +====== + +More to come... + + +reStructuredText +================ + +More to come... + +Sections +-------- + +Section headings are very flexible in reST. We use the following convention in +the Akka documentation: + +* ``#`` (over and under) for module headings +* ``=`` for sections +* ``-`` for subsections +* ``^`` for subsubsections +* ``~`` for subsubsubsections + + +Cross-referencing +----------------- + +Sections that may be cross-referenced across the documentation should be marked +with a reference. To mark a section use ``.. _ref-name:`` before the section +heading. The section can then be linked with ``:ref:`ref-name```. These are +unique references across the entire documentation. + +For example:: + + .. _akka-module: + + ############# + Akka Module + ############# + + This is the module documentation. + + .. _akka-section: + + Akka Section + ============ + + Akka Subsection + --------------- + + Here is a reference to "akka section": :ref:`akka-section` which will have the + name "Akka Section". + diff --git a/akka-docs/dev/index.rst b/akka-docs/dev/index.rst new file mode 100644 index 0000000000..05ab53742d --- /dev/null +++ b/akka-docs/dev/index.rst @@ -0,0 +1,7 @@ +Information for Developers +========================== + +.. toctree:: + :maxdepth: 2 + + documentation diff --git a/akka-docs/general/index.rst b/akka-docs/general/index.rst new file mode 100644 index 0000000000..1d716ed63a --- /dev/null +++ b/akka-docs/general/index.rst @@ -0,0 +1,11 @@ +General +======= + +.. toctree:: + :maxdepth: 1 + + migration-guide-0.7.x-0.8.x + migration-guide-0.8.x-0.9.x + migration-guide-0.9.x-0.10.x + migration-guide-0.10.x-1.0.x + migration-guide-1.0.x-1.1.x diff --git a/akka-docs/general/migration-guide-0.7.x-0.8.x.rst b/akka-docs/general/migration-guide-0.7.x-0.8.x.rst new file mode 100644 index 0000000000..4bf866a765 --- /dev/null +++ b/akka-docs/general/migration-guide-0.7.x-0.8.x.rst @@ -0,0 +1,94 @@ +Migration Guide 0.7.x to 0.8.x +============================== + +This is a case-by-case migration guide from Akka 0.7.x (on Scala 2.7.7) to Akka 0.8.x (on Scala 2.8.x) +------------------------------------------------------------------------------------------------------ + +Cases: +------ + +Actor.send is removed and replaced in full with Actor.! +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + myActor send "test" + +becomes + +.. code-block:: scala + + myActor ! "test" + +Actor.! now has it's implicit sender defaulted to None +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + def !(message: Any)(implicit sender: Option[Actor] = None) + +"import Actor.Sender.Self" has been removed because it's not needed anymore +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Remove + +.. code-block:: scala + + import Actor.Sender.Self + +Actor.spawn now uses manifests instead of concrete class types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val someActor = spawn(classOf[MyActor]) + +becomes + +.. code-block:: scala + + val someActor = spawn[MyActor] + +Actor.spawnRemote now uses manifests instead of concrete class types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val someActor = spawnRemote(classOf[MyActor],"somehost",1337) + +becomes + +.. code-block:: scala + + val someActor = spawnRemote[MyActor]("somehost",1337) + +Actor.spawnLink now uses manifests instead of concrete class types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val someActor = spawnLink(classOf[MyActor]) + +becomes + +.. code-block:: scala + + val someActor = spawnLink[MyActor] + +Actor.spawnLinkRemote now uses manifests instead of concrete class types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: scala + + val someActor = spawnLinkRemote(classOf[MyActor],"somehost",1337) + +becomes + +.. code-block:: scala + + val someActor = spawnLinkRemote[MyActor]("somehost",1337) + +**Transaction.atomic and friends are moved into Transaction.Local._ and Transaction.Global._** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We now make a difference between transaction management that are local within a thread and global across many threads (and actors). diff --git a/akka-docs/general/migration-guide-0.8.x-0.9.x.rst b/akka-docs/general/migration-guide-0.8.x-0.9.x.rst new file mode 100644 index 0000000000..ae4637c35a --- /dev/null +++ b/akka-docs/general/migration-guide-0.8.x-0.9.x.rst @@ -0,0 +1,172 @@ +Migration Guide 0.8.x to 0.9.x +============================== + +**This document describes between the 0.8.x and the 0.9 release.** + +Background for the new ActorRef +------------------------------- + +In the work towards 0.9 release we have now done a major change to how Actors are created. In short we have separated identity and value, created an 'ActorRef' that holds the actual Actor instance. This allows us to do many great things such as for example: + +* Create serializable, immutable, network-aware Actor references that can be freely shared across the network. They "remember" their origin and will always work as expected. +* Not only kill and restart the same supervised Actor instance when it has crashed (as we do now), but dereference it, throw it away and make it eligible for garbage collection. +* etc. much more + +These work very much like the 'PID' (process id) in Erlang. + +These changes means that there is no difference in defining Actors. You still use the old Actor trait, all methods are there etc. But you can't just new this Actor up and send messages to it since all its public API methods are gone. They now reside in a new class; 'ActorRef' and use need to use instances of this class to interact with the Actor (sending messages etc.). + +Here is a short migration guide with the things that you have to change. It is a big conceptual change but in practice you don't have to change much. + + + +Creating Actors with default constructor +---------------------------------------- + +From: + +.. code-block:: scala + + val a = new MyActor + a ! msg + +To: + +.. code-block:: scala + + import Actor._ + val a = actorOf[MyActor] + a ! msg + +You can also start it in the same statement: + +.. code-block:: scala + + val a = actorOf[MyActor].start + +Creating Actors with non-default constructor +-------------------------------------------- + +From: + +.. code-block:: scala + + val a = new MyActor(..) + a ! msg + +To: + +.. code-block:: scala + + import Actor._ + val a = actorOf(new MyActor(..)) + a ! msg + +Use of 'self' ActorRef API +-------------------------- + +Where you have used 'this' to refer to the Actor from within itself now use 'self': + +.. code-block:: scala + + self ! MessageToMe + +Now the Actor trait only has the callbacks you can implement: +* receive +* postRestart/preRestart +* init/shutdown + +It has no state at all. + +All API has been moved to ActorRef. The Actor is given its ActorRef through the 'self' member variable. +Here you find functions like: +* !, !!, !!! and forward +* link, unlink, startLink, spawnLink etc +* makeTransactional, makeRemote etc. +* start, stop +* etc. + +Here you also find fields like +* dispatcher = ... +* id = ... +* lifeCycle = ... +* faultHandler = ... +* trapExit = ... +* etc. + +This means that to use them you have to prefix them with 'self', like this: + +.. code-block:: scala + + self ! Message + +However, for convenience you can import these functions and fields like below, which will allow you do drop the 'self' prefix: + +.. code-block:: scala + + class MyActor extends Actor { + import self._ + id = ... + dispatcher = ... + spawnLink[OtherActor] + ... + } + +Serialization +------------- + +If you want to serialize it yourself, here is how to do it: + +.. code-block:: scala + + val actorRef1 = actorOf[MyActor] + + val bytes = actorRef1.toBinary + + val actorRef2 = ActorRef.fromBinary(bytes) + +If you are also using Protobuf then you can use the methods that work with Protobuf's Messages directly. + +.. code-block:: scala + + val actorRef1 = actorOf[MyActor] + + val protobufMessage = actorRef1.toProtocol + + val actorRef2 = ActorRef.fromProtocol(protobufMessage) + +Camel +----- + +Some methods of the se.scalablesolutions.akka.camel.Message class have been deprecated in 0.9. These are + +.. code-block:: scala + + package se.scalablesolutions.akka.camel + + case class Message(...) { + // ... + @deprecated def bodyAs[T](clazz: Class[T]): T + @deprecated def setBodyAs[T](clazz: Class[T]): Message + // ... + } + +They will be removed in 1.0. Instead use + +.. code-block:: scala + + package se.scalablesolutions.akka.camel + + case class Message(...) { + // ... + def bodyAs[T](implicit m: Manifest[T]): T = + def setBodyAs[T](implicit m: Manifest[T]): Message + // ... + } + +Usage example: +.. code-block:: scala + + val m = Message(1.4) + val b = m.bodyAs[String] + diff --git a/akka-docs/general/migration-guide-0.9.x-0.10.x.rst b/akka-docs/general/migration-guide-0.9.x-0.10.x.rst new file mode 100644 index 0000000000..85c2b54e93 --- /dev/null +++ b/akka-docs/general/migration-guide-0.9.x-0.10.x.rst @@ -0,0 +1,45 @@ +Migration Guide 0.9.x to 0.10.x +=============================== + +Module akka-camel +----------------- + +The following list summarizes the breaking changes since Akka 0.9.1. + +* CamelService moved from package se.scalablesolutions.akka.camel.service one level up to se.scalablesolutions.akka.camel. +* CamelService.newInstance removed. For starting and stopping a CamelService, applications should use +** CamelServiceManager.startCamelService and +** CamelServiceManager.stopCamelService. +* Existing def receive = produce method definitions from Producer implementations must be removed (resolves compile error: method receive needs override modifier). +* The Producer.async method and the related Sync trait have been removed. This is now fully covered by Camel's `asynchronous routing engine `_. +* @consume annotation can not placed any longer on actors (i.e. on type-level), only on typed actor methods. Consumer actors must mixin the Consumer trait. +* @consume annotation moved to package se.scalablesolutions.akka.camel. + +Logging +------- + +We've switched to Logback (SLF4J compatible) for the logging, if you're having trouble seeing your log output you'll need to make sure that there's a logback.xml available on the classpath or you'll need to specify the location of the logback.xml file via the system property, ex: -Dlogback.configurationFile=/path/to/logback.xml + +Configuration +------------- + +* The configuration is now JSON-style (see below). +* Now you can define the time-unit to be used throughout the config file: + +.. code-block:: ruby + + akka { + version = "0.10" + time-unit = "seconds" # default timeout time unit for all timeout properties throughout the config + + actor { + timeout = 5 # default timeout for future based invocations + throughput = 5 # default throughput for ExecutorBasedEventDrivenDispatcher + } + ... + } + +RemoteClient events +------------------- + +All events now has a reference to the RemoteClient instance instead of 'hostname' and 'port'. This is more flexible. Enables simpler reconnecting etc. diff --git a/akka-docs/general/migration-guide-1.0.x-1.1.x.rst b/akka-docs/general/migration-guide-1.0.x-1.1.x.rst new file mode 100644 index 0000000000..c473c44129 --- /dev/null +++ b/akka-docs/general/migration-guide-1.0.x-1.1.x.rst @@ -0,0 +1,40 @@ +Migration Guide 1.0.x to 1.1.x +=================================== + +**Akka has now moved to Scala 2.9.x** + + +Akka HTTP +--------- + +# akka.servlet.Initializer has been moved to ``akka-kernel`` to be able to have ``akka-http`` not depend on ``akka-remote``, if you don't want to use the class for kernel, just create your own version of ``akka.servlet.Initializer``, it's just a couple of lines of code and there is instructions here: `Akka Http Docs `_ +# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter `_ +# Jersey-server is now a "provided" dependency for ``akka-http``, so you'll need to add the dependency to your project, it's built against Jersey 1.3 + +Akka Actor +---------- + +# is now dependency free, with the exception of the dependency on the ``scala-library.jar`` +# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.aevent.EventHandler or by specifying the ``FQN`` of an Actor in the akka.conf under akka.event-handlers; there is an ``akka-slf4j`` module which still provides the Logging trait and a default ``SLF4J`` logger adapter. +Don't forget to add a SLF4J backend though, we recommend: + +.. code-block:: scala + lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" + +# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: ``akka.dispatch.HawtDispatcherConfigurator`` instead of ``HawtDispatcher`` +# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value). + +Akka Typed Actor +---------------- + +All methods starting with 'get*' are deprecated and will be removed in post 1.1 release. + +Akka Remote +----------- + +# ``UnparsebleException`` has been renamed to ``CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message)`` + +Akka Testkit +------------ + +The TestKit moved into the akka-testkit subproject and correspondingly into the ``akka.testkit` package. diff --git a/akka-docs/index.rst b/akka-docs/index.rst index c7b2486170..fbb2506fab 100644 --- a/akka-docs/index.rst +++ b/akka-docs/index.rst @@ -4,78 +4,12 @@ Contents .. toctree:: :maxdepth: 2 - manual/getting-started-first - pending/actor-registry-java - pending/actor-registry-scala - pending/actors-scala - pending/agents-scala - pending/articles - pending/benchmarks - pending/building-akka - pending/buildr - pending/cluster-membership - pending/companies-using-akka - pending/configuration - pending/dataflow-java - pending/dataflow-scala - pending/deployment-scenarios - pending/developer-guidelines - pending/dispatchers-java - pending/dispatchers-scala - pending/event-handler - pending/external-sample-projects - pending/fault-tolerance-java - pending/fault-tolerance-scala - pending/Feature Stability Matrix - pending/fsm-scala - pending/futures-scala - pending/getting-started - pending/guice-integration - pending/Home - pending/http - pending/issue-tracking - pending/language-bindings - pending/licenses - pending/logging - pending/Migration-1.0-1.1 - pending/migration-guide-0.10.x-1.0.x - pending/migration-guide-0.7.x-0.8.x - pending/migration-guide-0.8.x-0.9.x - pending/migration-guide-0.9.x-0.10.x - pending/migration-guides - pending/Recipes - pending/release-notes - pending/remote-actors-java - pending/remote-actors-scala - pending/routing-java - pending/routing-scala - pending/scheduler - pending/security - pending/serialization-java - pending/serialization-scala - pending/servlet - pending/slf4j - pending/sponsors - pending/stm - pending/stm-java - pending/stm-scala - pending/team - pending/test - pending/testkit - pending/testkit-example - pending/third-party-integrations - pending/transactors-java - pending/transactors-scala - pending/tutorial-chat-server-java - pending/tutorial-chat-server-scala - pending/typed-actors-java - pending/typed-actors-scala - pending/untyped-actors-java - pending/use-cases - pending/web + intro/index + general/index + scala/index + dev/index Links ===== -* `Akka Documentation `_ * `Support `_ diff --git a/akka-docs/intro/build-path.png b/akka-docs/intro/build-path.png new file mode 100644 index 0000000000..60f469e6d2 Binary files /dev/null and b/akka-docs/intro/build-path.png differ diff --git a/akka-docs/pending/building-akka.rst b/akka-docs/intro/building-akka.rst similarity index 54% rename from akka-docs/pending/building-akka.rst rename to akka-docs/intro/building-akka.rst index 31af34c687..3d4f4ca1a0 100644 --- a/akka-docs/pending/building-akka.rst +++ b/akka-docs/intro/building-akka.rst @@ -3,171 +3,185 @@ Building Akka This page describes how to build and run Akka from the latest source code. +.. contents:: :local: + + Get the source code ------------------- -Akka uses `Git `_ and is hosted at `Github `_. +Akka uses `Git `_ and is hosted at `Github +`_. -You first need Git installed on your machine. You can then clone the source repositories: -* Akka repository from ``_ -* Akka Modules repository from ``_ +You first need Git installed on your machine. You can then clone the source +repositories: -For example: +- Akka repository from ``_ +- Akka Modules repository from ``_ -:: +For example:: - git clone git://github.com/jboner/akka.git - git clone git://github.com/jboner/akka-modules.git + git clone git://github.com/jboner/akka.git + git clone git://github.com/jboner/akka-modules.git -If you have already cloned the repositories previously then you can update the code with ``git pull``: +If you have already cloned the repositories previously then you can update the +code with ``git pull``:: -:: + git pull origin master - git pull origin master SBT - Simple Build Tool ----------------------- -Akka is using the excellent `SBT `_ build system. So the first thing you have to do is to download and install SBT. You can read more about how to do that `here `_ . +Akka is using the excellent `SBT `_ +build system. So the first thing you have to do is to download and install +SBT. You can read more about how to do that `here +`_ . -The SBT commands that you'll need to build Akka are all included below. If you want to find out more about SBT and using it for your own projects do read the `SBT documentation `_. +The SBT commands that you'll need to build Akka are all included below. If you +want to find out more about SBT and using it for your own projects do read the +`SBT documentation +`_. -The Akka SBT build file is ``project/build/AkkaProject.scala`` with some properties defined in ``project/build.properties``. +The Akka SBT build file is ``project/build/AkkaProject.scala`` with some +properties defined in ``project/build.properties``. ----- Building Akka ------------- -First make sure that you are in the akka code directory: +First make sure that you are in the akka code directory:: -:: + cd akka - cd akka Fetching dependencies ^^^^^^^^^^^^^^^^^^^^^ -SBT does not fetch dependencies automatically. You need to manually do this with the ``update`` command: +SBT does not fetch dependencies automatically. You need to manually do this with +the ``update`` command:: -:: + sbt update - sbt update +Once finished, all the dependencies for Akka will be in the ``lib_managed`` +directory under each module: akka-actor, akka-stm, and so on. -Once finished, all the dependencies for Akka will be in the ``lib_managed`` directory under each module: akka-actor, akka-stm, and so on. +*Note: you only need to run update the first time you are building the code, +or when the dependencies have changed.* -*Note: you only need to run {{update}} the first time you are building the code, or when the dependencies have changed.* Building ^^^^^^^^ -To compile all the Akka core modules use the ``compile`` command: +To compile all the Akka core modules use the ``compile`` command:: -:: + sbt compile - sbt compile +You can run all tests with the ``test`` command:: -You can run all tests with the ``test`` command: + sbt test -:: +If compiling and testing are successful then you have everything working for the +latest Akka development version. - sbt test - -If compiling and testing are successful then you have everything working for the latest Akka development version. Publish to local Ivy repository ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you want to deploy the artifacts to your local Ivy repository (for example, to use from an SBT project) use the ``publish-local`` command: +If you want to deploy the artifacts to your local Ivy repository (for example, +to use from an SBT project) use the ``publish-local`` command:: -:: + sbt publish-local - sbt publish-local Publish to local Maven repository ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you want to deploy the artifacts to your local Maven repository use: +If you want to deploy the artifacts to your local Maven repository use:: -:: + sbt publish-local publish - sbt publish-local publish SBT interactive mode ^^^^^^^^^^^^^^^^^^^^ -Note that in the examples above we are calling ``sbt compile`` and ``sbt test`` and so on. SBT also has an interactive mode. If you just run ``sbt`` you enter the interactive SBT prompt and can enter the commands directly. This saves starting up a new JVM instance for each command and can be much faster and more convenient. +Note that in the examples above we are calling ``sbt compile`` and ``sbt test`` +and so on. SBT also has an interactive mode. If you just run ``sbt`` you enter +the interactive SBT prompt and can enter the commands directly. This saves +starting up a new JVM instance for each command and can be much faster and more +convenient. For example, building Akka as above is more commonly done like this: -:: +.. code-block:: none + + % sbt + [info] Building project akka 1.1-SNAPSHOT against Scala 2.9.0.RC1 + [info] using AkkaParentProject with sbt 0.7.6.RC0 and Scala 2.7.7 + > update + [info] + [info] == akka-actor / update == + ... + [success] Successful. + [info] + [info] Total time ... + > compile + ... + > test + ... - % sbt - [info] Building project akka 1.1-SNAPSHOT against Scala 2.8.1 - [info] using AkkaParentProject with sbt 0.7.5.RC0 and Scala 2.7.7 - > update - [info] - [info] == akka-actor / update == - ... - [success] Successful. - [info] - [info] Total time ... - > compile - ... - > test - ... SBT batch mode ^^^^^^^^^^^^^^ -It's also possible to combine commands in a single call. For example, updating, testing, and publishing Akka to the local Ivy repository can be done with: +It's also possible to combine commands in a single call. For example, updating, +testing, and publishing Akka to the local Ivy repository can be done with:: -:: + sbt update test publish-local - sbt update test publish-local - ----- Building Akka Modules --------------------- -To build Akka Modules first build and publish Akka to your local Ivy repository as described above. Or using: +To build Akka Modules first build and publish Akka to your local Ivy repository +as described above. Or using:: -:: + cd akka + sbt update publish-local - cd akka - sbt update publish-local +Then you can build Akka Modules using the same steps as building Akka. First +update to get all dependencies (including the Akka core modules), then compile, +test, or publish-local as needed. For example:: -Then you can build Akka Modules using the same steps as building Akka. First update to get all dependencies (including the Akka core modules), then compile, test, or publish-local as needed. For example: + cd akka-modules + sbt update publish-local -:: - - cd akka-modules - sbt update publish-local Microkernel distribution ^^^^^^^^^^^^^^^^^^^^^^^^ -To build the Akka Modules microkernel (the same as the Akka Modules distribution download) use the ``dist`` command: +To build the Akka Modules microkernel (the same as the Akka Modules distribution +download) use the ``dist`` command:: -:: + sbt dist - sbt dist +The distribution zip can be found in the dist directory and is called +``akka-modules-{version}.zip``. -The distribution zip can be found in the dist directory and is called ``akka-modules-{version}.zip``. +To run the mircokernel, unzip the zip file, change into the unzipped directory, +set the ``AKKA_HOME`` environment variable, and run the main jar file. For +example: -To run the mircokernel, unzip the zip file, change into the unzipped directory, set the ``AKKA_HOME`` environment variable, and run the main jar file. For example: +.. code-block:: none -:: + unzip dist/akka-modules-1.1-SNAPSHOT.zip + cd akka-modules-1.1-SNAPSHOT + export AKKA_HOME=`pwd` + java -jar akka-modules-1.1-SNAPSHOT.jar - unzip dist/akka-modules-1.1-SNAPSHOT.zip - cd akka-modules-1.1-SNAPSHOT - export AKKA_HOME=`pwd` - java -jar akka-modules-1.1-SNAPSHOT.jar +The microkernel will boot up and install the sample applications that reside in +the distribution's ``deploy`` directory. You can deploy your own applications +into the ``deploy`` directory as well. -The microkernel will boot up and install the sample applications that reside in the distribution's ``deploy`` directory. You can deploy your own applications into the ``deploy`` directory as well. - ----- Scripts ------- @@ -177,32 +191,38 @@ Linux/Unix init script Here is a Linux/Unix init script that can be very useful: -``_ +http://github.com/jboner/akka/blob/master/scripts/akka-init-script.sh Copy and modify as needed. + Simple startup shell script ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This little script might help a bit. Just make sure you have the Akka distribution in the '$AKKA_HOME/dist' directory and then invoke this script to start up the kernel. The distribution is created in the './dist' dir for you if you invoke 'sbt dist'. +This little script might help a bit. Just make sure you have the Akka +distribution in the '$AKKA_HOME/dist' directory and then invoke this script to +start up the kernel. The distribution is created in the './dist' dir for you if +you invoke 'sbt dist'. -``_ +http://github.com/jboner/akka/blob/master/scripts/run_akka.sh Copy and modify as needed. ----- Dependencies ------------ -If you are managing dependencies by hand you can find out what all the compile dependencies are for each module by looking in the ``lib_managed/compile`` directories. For example, you can run this to create a listing of dependencies (providing you have the source code and have run ``sbt update``): +If you are managing dependencies by hand you can find out what all the compile +dependencies are for each module by looking in the ``lib_managed/compile`` +directories. For example, you can run this to create a listing of dependencies +(providing you have the source code and have run ``sbt update``):: -:: + cd akka + ls -1 */lib_managed/compile - cd akka - ls -1 */lib_managed/compile -Here are the dependencies used by the Akka core modules. +Dependencies used by the Akka core modules +------------------------------------------ akka-actor ^^^^^^^^^^ @@ -247,8 +267,9 @@ akka-http * jsr250-api-1.0.jar * jsr311-api-1.1.jar ----- -Here are the dependencies used by the Akka modules. + +Dependencies used by the Akka modules +------------------------------------- akka-amqp ^^^^^^^^^ diff --git a/akka-docs/intro/configuration.rst b/akka-docs/intro/configuration.rst new file mode 100644 index 0000000000..fd19b71db4 --- /dev/null +++ b/akka-docs/intro/configuration.rst @@ -0,0 +1,31 @@ +Configuration +============= + +Specifying the configuration file +--------------------------------- + +If you don't specify a configuration file then Akka uses default values. If +you want to override these then you should edit the ``akka.conf`` file in the +``AKKA_HOME/config`` directory. This config inherits from the +``akka-reference.conf`` file that you see below. Use your ``akka.conf`` to override +any property in the reference config. + +The config can be specified in various ways: + +* Define the ``-Dakka.config=...`` system property option + +* Put an ``akka.conf`` file on the classpath + +* Define the ``AKKA_HOME`` environment variable pointing to the root of the Akka + distribution. The config is taken from the ``AKKA_HOME/config`` directory. You + can also point to the AKKA_HOME by specifying the ``-Dakka.home=...`` system + property option. + + +Defining the configuration file +------------------------------- + +Here is the reference configuration file: + +.. literalinclude:: ../../config/akka-reference.conf + :language: none diff --git a/akka-docs/intro/diagnostics-window.png b/akka-docs/intro/diagnostics-window.png new file mode 100644 index 0000000000..7036fd96fb Binary files /dev/null and b/akka-docs/intro/diagnostics-window.png differ diff --git a/akka-docs/intro/example-code.png b/akka-docs/intro/example-code.png new file mode 100644 index 0000000000..cd7e09f880 Binary files /dev/null and b/akka-docs/intro/example-code.png differ diff --git a/akka-docs/intro/examples/Pi.scala b/akka-docs/intro/examples/Pi.scala new file mode 100644 index 0000000000..6bf1dea903 --- /dev/null +++ b/akka-docs/intro/examples/Pi.scala @@ -0,0 +1,129 @@ +//#imports +package akka.tutorial.scala.first + +import akka.actor.{Actor, PoisonPill} +import Actor._ +import akka.routing.{Routing, CyclicIterator} +import Routing._ + +import System.{currentTimeMillis => now} +import java.util.concurrent.CountDownLatch +//#imports + +//#app +object Pi extends App { + + calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) + + //#actors-and-messages + // ==================== + // ===== Messages ===== + // ==================== + //#messages + sealed trait PiMessage + case object Calculate extends PiMessage + case class Work(start: Int, nrOfElements: Int) extends PiMessage + case class Result(value: Double) extends PiMessage + //#messages + + // ================== + // ===== Worker ===== + // ================== + //#worker + class Worker extends Actor { + + //#calculate-pi + def calculatePiFor(start: Int, nrOfElements: Int): Double = { + var acc = 0.0 + for (i <- start until (start + nrOfElements)) + acc += 4 * math.pow(-1, i) / (2 * i + 1) + acc + } + //#calculate-pi + + def receive = { + case Work(start, nrOfElements) => + self reply Result(calculatePiFor(start, nrOfElements)) // perform the work + } + } + //#worker + + // ================== + // ===== Master ===== + // ================== + //#master + class Master( + nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) + extends Actor { + + var pi: Double = _ + var nrOfResults: Int = _ + var start: Long = _ + + //#create-workers + // create the workers + val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start()) + + // wrap them with a load-balancing router + val router = Routing.loadBalancerActor(CyclicIterator(workers)).start() + //#create-workers + + //#master-receive + // message handler + def receive = { + //#message-handling + case Calculate => + // schedule work + for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements) + + // send a PoisonPill to all workers telling them to shut down themselves + router ! Broadcast(PoisonPill) + + // send a PoisonPill to the router, telling him to shut himself down + router ! PoisonPill + + case Result(value) => + // handle result from the worker + pi += value + nrOfResults += 1 + if (nrOfResults == nrOfMessages) self.stop() + //#message-handling + } + //#master-receive + + override def preStart { + start = now + } + + override def postStop { + // tell the world that the calculation is complete + println( + "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" + .format(pi, (now - start))) + latch.countDown() + } + } + //#master + //#actors-and-messages + + // ================== + // ===== Run it ===== + // ================== + def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) { + + // this latch is only plumbing to know when the calculation is completed + val latch = new CountDownLatch(1) + + // create the master + val master = actorOf( + new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() + + // start the calculation + master ! Calculate + + // wait for master to shut down + latch.await() + } +} +//#app + diff --git a/akka-docs/intro/getting-started-first-java.rst b/akka-docs/intro/getting-started-first-java.rst new file mode 100644 index 0000000000..df032a8970 --- /dev/null +++ b/akka-docs/intro/getting-started-first-java.rst @@ -0,0 +1,742 @@ +Getting Started Tutorial (Java): First Chapter +============================================== + +Introduction +------------ + +Welcome to the first tutorial on how to get started with Akka and Java. We assume that you already know what Akka and Java are and will now focus on the steps necessary to start your first project. + +There are two variations of this first tutorial: + +- creating a standalone project and run it from the command line +- creating a Maven project and running it from within Maven + +Since they are so similar we will present them both. + +The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster. + +We will be using an algorithm that is called "embarrassingly parallel" which just means that each job is completely isolated and not coupled with any other job. Since this algorithm is so parallelizable it suits the actor model very well. + +Here is the formula for the algorithm we will use: + +.. image:: pi-formula.png + +In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result. + +Tutorial source code +-------------------- + +If you want don't want to type in the code and/or set up a Maven project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_. + +Prerequisites +------------- + +This tutorial assumes that you have Java 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Java code. + +You need to make sure that ``$JAVA_HOME`` environment variable is set to the root of the Java distribution. You also need to make sure that the ``$JAVA_HOME/bin`` is on your ``PATH``:: + + $ export JAVA_HOME=..root of java distribution.. + $ export PATH=$PATH:$JAVA_HOME/bin + +You can test your installation by invoking ``java``:: + + $ java -version + java version "1.6.0_24" + Java(TM) SE Runtime Environment (build 1.6.0_24-b07-334-10M3326) + Java HotSpot(TM) 64-Bit Server VM (build 19.1-b02-334, mixed mode) + +Downloading and installing Akka +------------------------------- + +To build and run the tutorial sample from the command line, you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. + +Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory. + +You need to do one more thing in order to install Akka properly: set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell, navigating down to the distribution, and setting the ``AKKA_HOME`` variable:: + + $ cd /Users/jboner/tools/akka-1.1 + $ export AKKA_HOME=`pwd` + $ echo $AKKA_HOME + /Users/jboner/tools/akka-1.1 + +The distribution looks like this:: + + $ ls -l + total 16944 + drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 . + drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 .. + drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy + drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist + drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed + -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar + drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts + +- In the ``dist`` directory we have the Akka JARs, including sources and docs. +- In the ``lib_managed/compile`` directory we have Akka's dependency JARs. +- In the ``deploy`` directory we have the sample JARs. +- In the ``scripts`` directory we have scripts for running Akka. +- Finally ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. + +The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors. + +Akka is very modular and has many JARs for containing different features. The core distribution has seven modules: + +- ``akka-actor-1.1.jar`` -- Standard Actors +- ``akka-typed-actor-1.1.jar`` -- Typed Actors +- ``akka-remote-1.1.jar`` -- Remote Actors +- ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures +- ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration +- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener for logging with SLF4J +- ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors + +We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: + +- ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) +- ``akka-amqp-1.1.jar`` -- AMQP integration +- ``akka-camel-1.1.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world) +- ``akka-camel-typed-1.1.jar`` -- Apache Camel Typed Actors integration +- ``akka-scalaz-1.1.jar`` -- Support for the Scalaz library +- ``akka-spring-1.1.jar`` -- Spring framework integration +- ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + +Downloading and installing Maven +-------------------------------- + +Maven is an excellent build system that can be used to build both Java and Scala projects. If you want to use Maven for this tutorial then follow the following instructions, if not you can skip this section and the next. + +First browse to `http://maven.apache.org/download.html `_ and download the ``3.0.3`` distribution. + +To install Maven it is easiest to follow the instructions on `http://maven.apache.org/download.html#Installation `_. + +Creating an Akka Maven project +------------------------------ + +If you have not already done so, now is the time to create a Maven project for our tutorial. You do that by stepping into the directory you want to create your project in and invoking the ``mvn`` command:: + + $ mvn archetype:generate \ + -DgroupId=akka.tutorial.first.java \ + -DartifactId=akka-tutorial-first-java \ + -DarchetypeArtifactId=maven-archetype-quickstart \ + -DinteractiveMode=false + +Now we have the basis for our Maven-based Akka project. Let's step into the project directory:: + + $ cd akka-tutorial-first-java + +Here is the layout that Maven created:: + + akka-tutorial-first-jboner + |-- pom.xml + `-- src + |-- main + | `-- java + | `-- akka + | `-- tutorial + | `-- first + | `-- java + | `-- App.java + +As you can see we already have a Java source file called ``App.java``, let's now rename it to ``Pi.java``. + +We also need to edit the ``pom.xml`` build file. Let's add the dependency we need as well as the Maven repository it should download it from. It should now look something like this:: + + + + 4.0.0 + + akka-tutorial-first-java + akka.tutorial.first.java + akka-tutorial-first-java + jar + 1.0-SNAPSHOT + http://akka.io + + + + se.scalablesolutions.akka + akka-actor + 1.1 + + + + + + Akka + Akka Maven2 Repository + http://www.scalablesolutions.se/akka/repository/ + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 2.3.2 + + 1.6 + 1.6 + + + + + + +Start writing the code +---------------------- + +Now it's about time to start hacking. + +We start by creating a ``Pi.java`` file and adding these import statements at the top of the file:: + + package akka.tutorial.first.java; + + import static akka.actor.Actors.actorOf; + import static akka.actor.Actors.poisonPill; + import static java.util.Arrays.asList; + + import akka.actor.ActorRef; + import akka.actor.UntypedActor; + import akka.actor.UntypedActorFactory; + import akka.routing.CyclicIterator; + import akka.routing.InfiniteIterator; + import akka.routing.Routing.Broadcast; + import akka.routing.UntypedLoadBalancer; + + import java.util.concurrent.CountDownLatch; + +If you are using Maven in this tutorial then create the file in the ``src/main/java/akka/tutorial/first/java`` directory. + +If you are using the command line tools then create the file wherever you want. I will create it in a directory called ``tutorial`` at the root of the Akka distribution, e.g. in ``$AKKA_HOME/tutorial/akka/tutorial/first/java/Pi.java``. + +Creating the messages +--------------------- + +The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, and sends these chunks to the different workers in a round-robin fashion. The master waits until all the workers have completed their work and sent back results for aggregation. When computation is completed the master prints out the result, shuts down all workers and then itself. + +With this in mind, let's now create the messages that we want to have flowing in the system. We need three different messages: + +- ``Calculate`` -- sent to the ``Master`` actor to start the calculation +- ``Work`` -- sent from the ``Master`` actor to the ``Worker`` actors containing the work assignment +- ``Result`` -- sent from the ``Worker`` actors to the ``Master`` actor containing the result from the worker's calculation + +Messages sent to actors should always be immutable to avoid sharing mutable state. So let's start by creating three messages as immutable POJOs. We also create a wrapper ``Pi`` class to hold our implementation:: + + public class Pi { + + static class Calculate {} + + static class Work { + private final int start; + private final int nrOfElements; + + public Work(int start, int nrOfElements) { + this.start = start; + this.nrOfElements = nrOfElements; + } + + public int getStart() { return start; } + public int getNrOfElements() { return nrOfElements; } + } + + static class Result { + private final double value; + + public Result(double value) { + this.value = value; + } + + public double getValue() { return value; } + } + } + +Creating the worker +------------------- + +Now we can create the worker actor. This is done by extending in the ``UntypedActor`` base class and defining the ``onReceive`` method. The ``onReceive`` method defines our message handler. We expect it to be able to handle the ``Work`` message so we need to add a handler for this message:: + + static class Worker extends UntypedActor { + + // message handler + public void onReceive(Object message) { + if (message instanceof Work) { + Work work = (Work) message; + + // perform the work + double result = calculatePiFor(work.getStart(), work.getNrOfElements()) + + // reply with the result + getContext().replyUnsafe(new Result(result)); + + } else throw new IllegalArgumentException("Unknown message [" + message + "]"); + } + } + +As you can see we have now created an ``UntypedActor`` with a ``onReceive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``getContext().replyUnsafe(..)``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference for future use. + +The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method:: + + // define the work + private double calculatePiFor(int start, int nrOfElements) { + double acc = 0.0; + for (int i = start * nrOfElements; i <= ((start + 1) * nrOfElements - 1); i++) { + acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1); + } + return acc; + } + +Creating the master +------------------- + +The master actor is a little bit more involved. In its constructor we need to create the workers (the ``Worker`` actors) and start them. We will also wrap them in a load-balancing router to make it easier to spread out the work evenly between the workers. Let's do that first:: + + static class Master extends UntypedActor { + ... + + static class PiRouter extends UntypedLoadBalancer { + private final InfiniteIterator workers; + + public PiRouter(ActorRef[] workers) { + this.workers = new CyclicIterator(asList(workers)); + } + + public InfiniteIterator seq() { + return workers; + } + } + + public Master(...) { + ... + + // create the workers + final ActorRef[] workers = new ActorRef[nrOfWorkers]; + for (int i = 0; i < nrOfWorkers; i++) { + workers[i] = actorOf(Worker.class).start(); + } + + // wrap them with a load-balancing router + ActorRef router = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new PiRouter(workers); + } + }).start(); + } + } + +As you can see we are using the ``actorOf`` factory method to create actors, this method returns as an ``ActorRef`` which is a reference to our newly created actor. This method is available in the ``Actors`` object but is usually imported:: + + import static akka.actor.Actors.actorOf; + +One thing to note is that we used two different versions of the ``actorOf`` method. For creating the ``Worker`` actor we just pass in the class but to create the ``PiRouter`` actor we can't do that since the constructor in the ``PiRouter`` class takes arguments, instead we need to use the ``UntypedActorFactory`` which unfortunately is a bit more verbose. + +``actorOf`` is the only way to create an instance of an Actor, this is enforced by Akka runtime. The ``actorOf`` method instantiates the actor and returns, not an instance to the actor, but an instance to an ``ActorRef``. This reference is the handle through which you communicate with the actor. It is immutable, serializable and location-aware meaning that it "remembers" its original actor even if it is sent to other nodes across the network and can be seen as the equivalent to the Erlang actor's PID. + +The actor's life-cycle is: + +- Created -- ``Actor.actorOf[MyActor]`` -- can **not** receive messages +- Started -- ``actorRef.start()`` -- can receive messages +- Stopped -- ``actorRef.stop()`` -- can **not** receive messages + +Once the actor has been stopped it is dead and can not be started again. + +Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above, you saw that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We have to pass in three integer variables: + +- ``nrOfWorkers`` -- defining how many workers we should start up +- ``nrOfMessages`` -- defining how many number chunks to send out to the workers +- ``nrOfElements`` -- defining how big the number chunks sent to each worker should be + +Here is the master actor:: + + static class Master extends UntypedActor { + private final int nrOfMessages; + private final int nrOfElements; + private final CountDownLatch latch; + + private double pi; + private int nrOfResults; + private long start; + + private ActorRef router; + + static class PiRouter extends UntypedLoadBalancer { + private final InfiniteIterator workers; + + public PiRouter(ActorRef[] workers) { + this.workers = new CyclicIterator(asList(workers)); + } + + public InfiniteIterator seq() { + return workers; + } + } + + public Master( + int nrOfWorkers, int nrOfMessages, int nrOfElements, CountDownLatch latch) { + this.nrOfMessages = nrOfMessages; + this.nrOfElements = nrOfElements; + this.latch = latch; + + // create the workers + final ActorRef[] workers = new ActorRef[nrOfWorkers]; + for (int i = 0; i < nrOfWorkers; i++) { + workers[i] = actorOf(Worker.class).start(); + } + + // wrap them with a load-balancing router + router = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new PiRouter(workers); + } + }).start(); + } + + // message handler + public void onReceive(Object message) { ... } + + @Override + public void preStart() { + start = System.currentTimeMillis(); + } + + @Override + public void postStop() { + // tell the world that the calculation is complete + System.out.println(String.format( + "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis", + pi, (System.currentTimeMillis() - start))); + latch.countDown(); + } + } + +A couple of things are worth explaining further. + +First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``!!!`` to achieve the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now. + +Second, we are adding a couple of life-cycle callback methods; ``preStart`` and ``postStop``. In the ``preStart`` callback we are recording the time when the actor is started and in the ``postStop`` callback we are printing out the result (the approximation of Pi) and the time it took to calculate it. In this call we also invoke ``latch.countDown()`` to tell the outside world that we are done. + +But we are not done yet. We are missing the message handler for the ``Master`` actor. This message handler needs to be able to react to two different messages: + +- ``Calculate`` -- which should start the calculation +- ``Result`` -- which should aggregate the different results + +The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``new Broadcast(poisonPill())`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). ``PoisonPill`` is a special kind of message that tells the receiver to shut itself down using the normal shutdown method; ``getContext().stop()``, and is created through the ``poisonPill()`` method. We also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down). + +The ``Result`` handler is simpler, here we get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back, and if that matches the number of tasks sent out, the ``Master`` actor considers itself done and shuts down. + +Let's capture this in code:: + + // message handler + public void onReceive(Object message) { + + if (message instanceof Calculate) { + // schedule work + for (int start = 0; start < nrOfMessages; start++) { + router.sendOneWay(new Work(start, nrOfElements), getContext()); + } + + // send a PoisonPill to all workers telling them to shut down themselves + router.sendOneWay(new Broadcast(poisonPill())); + + // send a PoisonPill to the router, telling him to shut himself down + router.sendOneWay(poisonPill()); + + } else if (message instanceof Result) { + + // handle result from the worker + Result result = (Result) message; + pi += result.getValue(); + nrOfResults += 1; + if (nrOfResults == nrOfMessages) getContext().stop(); + + } else throw new IllegalArgumentException("Unknown message [" + message + "]"); + } + +Bootstrap the calculation +------------------------- + +Now the only thing that is left to implement is the runner that should bootstrap and run the calculation for us. We do that by adding a ``main`` method to the enclosing ``Pi`` class in which we create a new instance of ``Pi`` and invoke method ``calculate`` in which we start up the ``Master`` actor and wait for it to finish:: + + public class Pi { + + public static void main(String[] args) throws Exception { + Pi pi = new Pi(); + pi.calculate(4, 10000, 10000); + } + + public void calculate(int nrOfWorkers, int nrOfElements, int nrOfMessages) + throws Exception { + + // this latch is only plumbing to know when the calculation is completed + final CountDownLatch latch = new CountDownLatch(1); + + // create the master + ActorRef master = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch); + } + }).start(); + + // start the calculation + master.sendOneWay(new Calculate()); + + // wait for master to shut down + latch.await(); + } + } + +That's it. Now we are done. + +Before we package it up and run it, let's take a look at the full code now, with package declaration, imports and all:: + + package akka.tutorial.first.java; + + import static akka.actor.Actors.actorOf; + import static akka.actor.Actors.poisonPill; + import static java.util.Arrays.asList; + + import akka.actor.ActorRef; + import akka.actor.UntypedActor; + import akka.actor.UntypedActorFactory; + import akka.routing.CyclicIterator; + import akka.routing.InfiniteIterator; + import akka.routing.Routing.Broadcast; + import akka.routing.UntypedLoadBalancer; + + import java.util.concurrent.CountDownLatch; + + public class Pi { + + public static void main(String[] args) throws Exception { + Pi pi = new Pi(); + pi.calculate(4, 10000, 10000); + } + + // ==================== + // ===== Messages ===== + // ==================== + static class Calculate {} + + static class Work { + private final int start; + private final int nrOfElements; + + public Work(int start, int nrOfElements) { + this.start = start; + this.nrOfElements = nrOfElements; + } + + public int getStart() { return start; } + public int getNrOfElements() { return nrOfElements; } + } + + static class Result { + private final double value; + + public Result(double value) { + this.value = value; + } + + public double getValue() { return value; } + } + + // ================== + // ===== Worker ===== + // ================== + static class Worker extends UntypedActor { + + // define the work + private double calculatePiFor(int start, int nrOfElements) { + double acc = 0.0; + for (int i = start * nrOfElements; i <= ((start + 1) * nrOfElements - 1); i++) { + acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1); + } + return acc; + } + + // message handler + public void onReceive(Object message) { + if (message instanceof Work) { + Work work = (Work) message; + + // perform the work + double result = calculatePiFor(work.getStart(), work.getNrOfElements()) + + // reply with the result + getContext().replyUnsafe(new Result(result)); + + } else throw new IllegalArgumentException("Unknown message [" + message + "]"); + } + } + + // ================== + // ===== Master ===== + // ================== + static class Master extends UntypedActor { + private final int nrOfMessages; + private final int nrOfElements; + private final CountDownLatch latch; + + private double pi; + private int nrOfResults; + private long start; + + private ActorRef router; + + static class PiRouter extends UntypedLoadBalancer { + private final InfiniteIterator workers; + + public PiRouter(ActorRef[] workers) { + this.workers = new CyclicIterator(asList(workers)); + } + + public InfiniteIterator seq() { + return workers; + } + } + + public Master( + int nrOfWorkers, int nrOfMessages, int nrOfElements, CountDownLatch latch) { + + this.nrOfMessages = nrOfMessages; + this.nrOfElements = nrOfElements; + this.latch = latch; + + // create the workers + final ActorRef[] workers = new ActorRef[nrOfWorkers]; + for (int i = 0; i < nrOfWorkers; i++) { + workers[i] = actorOf(Worker.class).start(); + } + + // wrap them with a load-balancing router + router = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new PiRouter(workers); + } + }).start(); + } + + // message handler + public void onReceive(Object message) { + + if (message instanceof Calculate) { + // schedule work + for (int start = 0; start < nrOfMessages; start++) { + router.sendOneWay(new Work(start, nrOfElements), getContext()); + } + + // send a PoisonPill to all workers telling them to shut down themselves + router.sendOneWay(new Broadcast(poisonPill())); + + // send a PoisonPill to the router, telling him to shut himself down + router.sendOneWay(poisonPill()); + + } else if (message instanceof Result) { + + // handle result from the worker + Result result = (Result) message; + pi += result.getValue(); + nrOfResults += 1; + if (nrOfResults == nrOfMessages) getContext().stop(); + + } else throw new IllegalArgumentException("Unknown message [" + message + "]"); + } + + @Override + public void preStart() { + start = System.currentTimeMillis(); + } + + @Override + public void postStop() { + // tell the world that the calculation is complete + System.out.println(String.format( + "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis", + pi, (System.currentTimeMillis() - start))); + latch.countDown(); + } + } + + // ================== + // ===== Run it ===== + // ================== + public void calculate(int nrOfWorkers, int nrOfElements, int nrOfMessages) + throws Exception { + + // this latch is only plumbing to know when the calculation is completed + final CountDownLatch latch = new CountDownLatch(1); + + // create the master + ActorRef master = actorOf(new UntypedActorFactory() { + public UntypedActor create() { + return new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch); + } + }).start(); + + // start the calculation + master.sendOneWay(new Calculate()); + + // wait for master to shut down + latch.await(); + } + } + +Run it as a command line application +------------------------------------ + +To build and run the tutorial from the command line, you need to have the Scala library JAR on the classpath. + +Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. + +The ``scala-library.jar`` resides in the ``scala-2.9.0.RC1/lib`` directory. Copy that to your project directory. + +If you have not typed in (or copied) the code for the tutorial as ``$AKKA_HOME/tutorial/akka/tutorial/first/java/Pi.java`` then now is the time. When that's done open up a shell and step in to the Akka distribution (``cd $AKKA_HOME``). + +First we need to compile the source file. That is done with Java's compiler ``javac``. Our application depends on the ``akka-actor-1.1.jar`` and the ``scala-library.jar`` JAR files, so let's add them to the compiler classpath when we compile the source:: + + $ javac -cp dist/akka-actor-1.1.jar:scala-library.jar tutorial/Pi.scala + +When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` and the ``scala-library.jar`` JAR files to the classpath as well as the classes we compiled ourselves:: + + $ java \ + -cp dist/akka-actor-1.1.jar:scala-library.jar:tutorial \ + akka.tutorial.java.first.Pi + AKKA_HOME is defined as [/Users/jboner/src/akka-stuff/akka-core] + loading config from [/Users/jboner/src/akka-stuff/akka-core/config/akka.conf]. + + Pi estimate: 3.1435501812459323 + Calculation time: 822 millis + +Yippee! It is working. + +If you have not defined the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. + +Run it inside Maven +------------------- + +If you used Maven, then you can run the application directly inside Maven. First you need to compile the project:: + + $ mvn compile + +When this in done we can run our application directly inside SBT:: + + $ mvn exec:java -Dexec.mainClass="akka.tutorial.first.java.Pi" + ... + Pi estimate: 3.1435501812459323 + Calculation time: 939 millis + +Yippee! It is working. + +If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. + +Conclusion +---------- + +We have learned how to create our first Akka project using Akka's actors to speed up a computation-intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned to compile and run an Akka project using either the tools on the command line or the SBT build system. + +If you have a multi-core machine then I encourage you to try out different number of workers (number of working actors) by tweaking the ``nrOfWorkers`` variable to for example; 2, 4, 6, 8 etc. to see performance improvement by scaling up. + +Now we are ready to take on more advanced problems. In the next tutorial we will build on this one, refactor it into more idiomatic Akka and Scala code, and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_. + +Happy hakking. diff --git a/akka-docs/intro/getting-started-first-scala-eclipse.rst b/akka-docs/intro/getting-started-first-scala-eclipse.rst new file mode 100644 index 0000000000..d34f242e7d --- /dev/null +++ b/akka-docs/intro/getting-started-first-scala-eclipse.rst @@ -0,0 +1,419 @@ +Getting Started Tutorial (Scala with Eclipse): First Chapter +============================================================ + +Introduction +------------ + +Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala are and will now focus on the steps necessary to start your first project. We will be using `Eclipse `_, and the `Scala plugin for Eclipse `_. + +The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster. + +We will be using an algorithm that is called "embarrassingly parallel" which just means that each job is completely isolated and not coupled with any other job. Since this algorithm is so parallelizable it suits the actor model very well. + +Here is the formula for the algorithm we will use: + +.. image:: pi-formula.png + +In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result. + +Tutorial source code +-------------------- + +If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_. + +Prerequisites +------------- + +This tutorial assumes that you have Java 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a recent version of Eclipse (at least `3.6 - Helios `_). + +If you want to run the example from the command line as well, you need to make sure that ``$JAVA_HOME`` environment variable is set to the root of the Java distribution. You also need to make sure that the ``$JAVA_HOME/bin`` is on your ``PATH``:: + + $ export JAVA_HOME=..root of java distribution.. + $ export PATH=$PATH:$JAVA_HOME/bin + +You can test your installation by invoking ``java``:: + + $ java -version + java version "1.6.0_24" + Java(TM) SE Runtime Environment (build 1.6.0_24-b07-334-10M3326) + Java HotSpot(TM) 64-Bit Server VM (build 19.1-b02-334, mixed mode) + +Downloading and installing Akka +------------------------------- + +To build and run the tutorial sample from the command line, you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. + +Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory. + +You need to do one more thing in order to install Akka properly: set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell, navigating down to the distribution, and setting the ``AKKA_HOME`` variable:: + + $ cd /Users/jboner/tools/akka-1.1 + $ export AKKA_HOME=`pwd` + $ echo $AKKA_HOME + /Users/jboner/tools/akka-1.1 + +The distribution looks like this:: + + $ ls -l + total 16944 + drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 . + drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 .. + drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy + drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist + drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed + -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar + drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts + +- In the ``dist`` directory we have the Akka JARs, including sources and docs. +- In the ``lib_managed/compile`` directory we have Akka's dependency JARs. +- In the ``deploy`` directory we have the sample JARs. +- In the ``scripts`` directory we have scripts for running Akka. +- Finally ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. + +The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors. + +Akka is very modular and has many JARs for containing different features. The core distribution has seven modules: + +- ``akka-actor-1.1.jar`` -- Standard Actors +- ``akka-typed-actor-1.1.jar`` -- Typed Actors +- ``akka-remote-1.1.jar`` -- Remote Actors +- ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures +- ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration +- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener +- ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors + +We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: + +- ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) +- ``akka-amqp-1.1.jar`` -- AMQP integration +- ``akka-camel-1.1.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world) +- ``akka-camel-typed-1.1.jar`` -- Apache Camel Typed Actors integration +- ``akka-scalaz-1.1.jar`` -- Support for the Scalaz library +- ``akka-spring-1.1.jar`` -- Spring framework integration +- ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + +Downloading and installing the Scala IDE for Eclipse +---------------------------------------------------- + +If you want to use Eclipse for coding your Akka tutorial, you need to install the Scala plugin for Eclipse. This plugin comes with its own version of Scala, so if you don't plan to run the example from the command line, you don't need to download the Scala distribution (and you can skip the next section). + +You can install this plugin using the regular update mechanism. First choose a version of the IDE from `http://download.scala-ide.org `_. We recommend you choose 2.0.x, which comes with Scala 2.9. Copy the corresponding URL and then choose ``Help/Install New Software`` and paste the URL you just copied. You should see something similar to the following image. + +.. image:: install-beta2-updatesite.png + +Make sure you select both the ``JDT Weaving for Scala`` and the ``Scala IDE for Eclipse`` plugins. The other plugin is optional, and contains the source code of the plugin itself. + +Once the installation is finished, you need to restart Eclipse. The first time the plugin starts it will open a diagnostics window and offer to fix several settings, such as the delay for content assist (code-completion) or the shown completion proposal types. + +.. image:: diagnostics-window.png + +Accept the recommended settings, and follow the instructions if you need to increase the heap size of Eclipse. + +Check that the installation succeeded by creating a new Scala project (``File/New>Scala Project``), and typing some code. You should have content-assist, hyperlinking to definitions, instant error reporting, and so on. + +.. image:: example-code.png + +You are ready to code now! + +Downloading and installing Scala +-------------------------------- + +To build and run the tutorial sample from the command line, you have to install the Scala distribution. If you prefer to use Eclipse to build and run the sample then you can skip this section and jump to the next one. + +Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. + +You also need to make sure that the ``scala-2.9.0.RC1/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: + + $ export PATH=$PATH:scala-2.9.0.RC1/bin + +You can test your installation by invoking scala:: + + $ scala -version + Scala code runner version 2.9.0.RC1 -- Copyright 2002-2011, LAMP/EPFL + +Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first). + +Some tools require you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that. + +Creating an Akka project in Eclipse +--------------------------------------- + +If you have not already done so, now is the time to create an Eclipse project for our tutorial. Use the ``New Scala Project`` wizard and accept the default settings. Once the project is open, we need to add the akka libraries to the *build path*. Right click on the project and choose ``Properties``, then click on ``Java Build Path``. Go to ``Libraries`` and click on ``Add External Jars..``, then navigate to the location where you installed akka and choose ``akka-actor.jar``. You should see something similar to this: + +.. image:: build-path.png + +Using SBT in Eclipse +^^^^^^^^^^^^^^^^^^^^ + +If you are an `SBT `_ user, you can follow the :doc:`Akka Tutorial in Scala ` and additionally install the ``sbt-eclipse`` plugin. This adds support for generating Eclipse project files from your SBT project. You need to update your SBT plugins definition in ``project/plugins``:: + + import sbt._ + + class TutorialPlugins(info: ProjectInfo) extends PluginDefinition(info) { + // eclipsify plugin + lazy val eclipse = "de.element34" % "sbt-eclipsify" % "0.7.0" + + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1" + } + +and then update your SBT project definition by mixing in ``Eclipsify`` in your project definition:: + + import sbt._ + import de.element34.sbteclipsify._ + + class MySbtProject(info: ProjectInfo) extends DefaultProject(info) + with Eclipsify with AkkaProject { + // the project definition here + // akka dependencies + } + +Then run the ``eclipse`` target to generate the Eclipse project:: + + dragos@dragos-imac pi $ sbt eclipse + [info] Building project AkkaPi 1.0 against Scala 2.9.0.RC1 + [info] using MySbtProject with sbt 0.7.4 and Scala 2.7.7 + [info] + [info] == eclipse == + [info] Creating eclipse project... + [info] == eclipse == + [success] Successful. + [info] + [info] Total time: 0 s, completed Apr 20, 2011 2:48:03 PM + [info] + [info] Total session time: 1 s, completed Apr 20, 2011 2:48:03 PM + [success] Build completed successfully. + +Next you need to import this project in Eclipse, by choosing ``Eclipse/Import.. Existing Projects into Workspace``. Navigate to the directory where you defined your SBT project and choose import: + +.. image:: import-project.png + +Now we have the basis for an Akka Eclipse application, so we can.. + +Start writing the code +---------------------- + +The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, and sends these chunks to the different workers in a round-robin fashion. The master waits until all the workers have completed their work and sent back results for aggregation. When computation is completed the master prints out the result, shuts down all workers and then itself. + +With this in mind, let's now create the messages that we want to have flowing in the system. + +Creating the messages +--------------------- + +We start by creating a package for our application, let's call it ``akka.tutorial.first.scala``. We start by creating case classes for each type of message in our application, so we can place them in a hierarchy, call it ``PiMessage``. Right click on the package and choose ``New Scala Class``, and enter ``PiMessage`` for the name of the class. + +We need three different messages: + +- ``Calculate`` -- sent to the ``Master`` actor to start the calculation +- ``Work`` -- sent from the ``Master`` actor to the ``Worker`` actors containing the work assignment +- ``Result`` -- sent from the ``Worker`` actors to the ``Master`` actor containing the result from the worker's calculation + +Messages sent to actors should always be immutable to avoid sharing mutable state. In Scala we have 'case classes' which make excellent messages. So let's start by creating three messages as case classes. We also create a common base trait for our messages (that we define as being ``sealed`` in order to prevent creating messages outside our control):: + + package akka.tutorial.first.scala + + sealed trait PiMessage + + case object Calculate extends PiMessage + + case class Work(start: Int, nrOfElements: Int) extends PiMessage + + case class Result(value: Double) extends PiMessage + +Creating the worker +------------------- + +Now we can create the worker actor. Create a new class called ``Worker`` as before. We need to mix in the ``Actor`` trait and defining the ``receive`` method. The ``receive`` method defines our message handler. We expect it to be able to handle the ``Work`` message so we need to add a handler for this message:: + + class Worker extends Actor { + def receive = { + case Work(start, nrOfElements) => + self reply Result(calculatePiFor(start, nrOfElements)) // perform the work + } + } + +The ``Actor`` trait is defined in ``akka.actor`` and you can either import it explicitly, or let Eclipse do it for you when it cannot resolve the ``Actor`` trait. The quick fix option (``Ctrl-F1``) will offer two options: + +.. image:: quickfix.png + +Choose the Akka Actor and move on. + +As you can see we have now created an ``Actor`` with a ``receive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``self.reply``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference for future use. + +The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method. While there are many ways we can implement this algorithm in Scala, in this introductory tutorial we have chosen an imperative style using a for comprehension and an accumulator:: + + def calculatePiFor(start: Int, nrOfElements: Int): Double = { + var acc = 0.0 + for (i <- start until (start + nrOfElements)) + acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1) + acc + } + +Creating the master +------------------- + +Now create a new class for the master actor. The master actor is a little bit more involved. In its constructor we need to create the workers (the ``Worker`` actors) and start them. We will also wrap them in a load-balancing router to make it easier to spread out the work evenly between the workers. First we need to add some imports:: + + import akka.actor.{Actor, PoisonPill} + import akka.routing.{Routing, CyclicIterator} + import Routing._ + import akka.dispatch.Dispatchers + + import java.util.concurrent.CountDownLatch + +and then we can create the workers:: + + // create the workers + val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start()) + + // wrap them with a load-balancing router + val router = Routing.loadBalancerActor(CyclicIterator(workers)).start() + +As you can see we are using the ``actorOf`` factory method to create actors, this method returns as an ``ActorRef`` which is a reference to our newly created actor. This method is available in the ``Actor`` object but is usually imported:: + + import akka.actor.Actor.actorOf + +There are two versions of ``actorOf``; one of them taking a actor type and the other one an instance of an actor. The former one (``actorOf[MyActor]``) is used when the actor class has a no-argument constructor while the second one (``actorOf(new MyActor(..))``) is used when the actor class has a constructor that takes arguments. This is the only way to create an instance of an Actor and the ``actorOf`` method ensures this. The latter version is using call-by-name and lazily creates the actor within the scope of the ``actorOf`` method. The ``actorOf`` method instantiates the actor and returns, not an instance to the actor, but an instance to an ``ActorRef``. This reference is the handle through which you communicate with the actor. It is immutable, serializable and location-aware meaning that it "remembers" its original actor even if it is sent to other nodes across the network and can be seen as the equivalent to the Erlang actor's PID. + +The actor's life-cycle is: + +- Created -- ``Actor.actorOf[MyActor]`` -- can **not** receive messages +- Started -- ``actorRef.start()`` -- can receive messages +- Stopped -- ``actorRef.stop()`` -- can **not** receive messages + +Once the actor has been stopped it is dead and can not be started again. + +Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above, you saw that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We have to pass in three integer variables: + +- ``nrOfWorkers`` -- defining how many workers we should start up +- ``nrOfMessages`` -- defining how many number chunks to send out to the workers +- ``nrOfElements`` -- defining how big the number chunks sent to each worker should be + +Here is the master actor:: + + class Master( + nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) + extends Actor { + + var pi: Double = _ + var nrOfResults: Int = _ + var start: Long = _ + + // create the workers + val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start()) + + // wrap them with a load-balancing router + val router = Routing.loadBalancerActor(CyclicIterator(workers)).start() + + def receive = { ... } + + override def preStart { + start = System.currentTimeMillis + } + + override def postStop { + // tell the world that the calculation is complete + println( + "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" + .format(pi, (System.currentTimeMillis - start))) + latch.countDown() + } + } + +A couple of things are worth explaining further. + +First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``!!!`` to achieve the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now. + +Second, we are adding a couple of life-cycle callback methods; ``preStart`` and ``postStop``. In the ``preStart`` callback we are recording the time when the actor is started and in the ``postStop`` callback we are printing out the result (the approximation of Pi) and the time it took to calculate it. In this call we also invoke ``latch.countDown`` to tell the outside world that we are done. + +But we are not done yet. We are missing the message handler for the ``Master`` actor. This message handler needs to be able to react to two different messages: + +- ``Calculate`` -- which should start the calculation +- ``Result`` -- which should aggregate the different results + +The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``Broadcast(PoisonPill)`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). ``PoisonPill`` is a special kind of message that tells the receiver to shut itself down using the normal shutdown method; ``self.stop``. We also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down). + +The ``Result`` handler is simpler, here we get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back, and if that matches the number of tasks sent out, the ``Master`` actor considers itself done and shuts down. + +Let's capture this in code:: + + // message handler + def receive = { + case Calculate => + // schedule work + for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements) + + // send a PoisonPill to all workers telling them to shut down themselves + router ! Broadcast(PoisonPill) + + // send a PoisonPill to the router, telling him to shut himself down + router ! PoisonPill + + case Result(value) => + // handle result from the worker + pi += value + nrOfResults += 1 + if (nrOfResults == nrOfMessages) self.stop() + } + +Bootstrap the calculation +------------------------- + +Now the only thing that is left to implement is the runner that should bootstrap and run the calculation for us. We do that by creating an object that we call ``Pi``, here we can extend the ``App`` trait in Scala, which means that we will be able to run this as an application directly from the command line or using the Eclipse Runner. + +The ``Pi`` object is a perfect container module for our actors and messages, so let's put them all there. We also create a method ``calculate`` in which we start up the ``Master`` actor and wait for it to finish:: + + object Pi extends App { + + calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) + + ... // actors and messages + + def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) { + + // this latch is only plumbing to know when the calculation is completed + val latch = new CountDownLatch(1) + + // create the master + val master = actorOf( + new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() + + // start the calculation + master ! Calculate + + // wait for master to shut down + latch.await() + } + } + +That's it. Now we are done. + +Run it from Eclipse +------------------- + +Eclipse builds your project on every save when ``Project/Build Automatically`` is set. If not, bring you project up to date by clicking ``Project/Build Project``. If there are no compilation errors, you can right-click in the editor where ``Pi`` is defined, and choose ``Run as.. /Scala application``. If everything works fine, you should see:: + + AKKA_HOME is defined as [/Users/jboner/tools/akka-modules-1.1-M1/] + loading config from [/Users/jboner/tools/akka-modules-1.1-M1/config/akka.conf]. + + Pi estimate: 3.1435501812459323 + Calculation time: 858 millis + +If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. + +You can also define a new Run configuration, by going to ``Run/Run Configurations``. Create a new ``Scala application`` and choose the tutorial project and the main class to be ``akkatutorial.Pi``. You can pass additional command line arguments to the JVM on the ``Arguments`` page, for instance to define where ``akka.conf`` is: + +.. image:: run-config.png + +Once you finished your run configuration, click ``Run``. You should see the same output in the ``Console`` window. You can use the same configuration for debugging the application, by choosing ``Run/Debug History`` or just ``Debug As``. + +Conclusion +---------- + +We have learned how to create our first Akka project using Akka's actors to speed up a computation-intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned to compile and run an Akka project using Eclipse. + +If you have a multi-core machine then I encourage you to try out different number of workers (number of working actors) by tweaking the ``nrOfWorkers`` variable to for example; 2, 4, 6, 8 etc. to see performance improvement by scaling up. + +Now we are ready to take on more advanced problems. In the next tutorial we will build on this one, refactor it into more idiomatic Akka and Scala code, and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_. + +Happy hakking. diff --git a/akka-docs/manual/getting-started-first.rst b/akka-docs/intro/getting-started-first-scala.rst similarity index 59% rename from akka-docs/manual/getting-started-first.rst rename to akka-docs/intro/getting-started-first-scala.rst index e0f6be1954..17c4d265c4 100644 --- a/akka-docs/manual/getting-started-first.rst +++ b/akka-docs/intro/getting-started-first-scala.rst @@ -1,17 +1,17 @@ -Getting Started Tutorial: First Chapter -======================================= +Getting Started Tutorial (Scala): First Chapter +=============================================== Introduction ------------ -Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala is and will now focus on the steps necessary to start your first project. +Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala are and will now focus on the steps necessary to start your first project. There are two variations of this first tutorial: - creating a standalone project and run it from the command line - creating a SBT (Simple Build Tool) project and running it from within SBT -Since they are so similar we will present them both in this tutorial. +Since they are so similar we will present them both. The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster. @@ -21,7 +21,7 @@ Here is the formula for the algorithm we will use: .. image:: pi-formula.png -In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed, when each worker has processed its chunk it sends a result back to the master which aggregates to total result. +In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result. Tutorial source code -------------------- @@ -31,23 +31,35 @@ If you want don't want to type in the code and/or set up an SBT project then you Prerequisites ------------- -This tutorial assumes that you have Jave 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code in. +This tutorial assumes that you have Jave 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code. + +You need to make sure that ``$JAVA_HOME`` environment variable is set to the root of the Java distribution. You also need to make sure that the ``$JAVA_HOME/bin`` is on your ``PATH``:: + + $ export JAVA_HOME=..root of java distribution.. + $ export PATH=$PATH:$JAVA_HOME/bin + +You can test your installation by invoking ``java``:: + + $ java -version + java version "1.6.0_24" + Java(TM) SE Runtime Environment (build 1.6.0_24-b07-334-10M3326) + Java HotSpot(TM) 64-Bit Server VM (build 19.1-b02-334, mixed mode) Downloading and installing Akka ------------------------------- -If you want to be able to build and run the tutorial sample from the command line then you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. +To build and run the tutorial sample from the command line, you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory. -You need to do one more thing in order to install Akka properly and that is to set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell and navigating down to the distribution and setting the ``AKKA_HOME`` variable:: +You need to do one more thing in order to install Akka properly: set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell, navigating down to the distribution, and setting the ``AKKA_HOME`` variable:: $ cd /Users/jboner/tools/akka-1.1 $ export AKKA_HOME=`pwd` $ echo $AKKA_HOME /Users/jboner/tools/akka-1.1 -If we now take a look at what we have in this distribution, looks like this:: +The distribution looks like this:: $ ls -l total 16944 @@ -59,11 +71,11 @@ If we now take a look at what we have in this distribution, looks like this:: -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts -- In the ``dist`` directory we have all the Akka JARs, including sources and docs. -- In the ``lib_managed/compile`` directory we have all the Akka's dependency JARs. -- In the ``deploy`` directory we have all the sample JARs. +- In the ``dist`` directory we have the Akka JARs, including sources and docs. +- In the ``lib_managed/compile`` directory we have Akka's dependency JARs. +- In the ``deploy`` directory we have the sample JARs. - In the ``scripts`` directory we have scripts for running Akka. -- Finallly the ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. +- Finally ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors. @@ -77,7 +89,7 @@ Akka is very modular and has many JARs for containing different features. The co - ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener - ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors -We also have Akka Modules containing add-on modules for the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today but for your information the module JARs are these: +We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: - ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) - ``akka-amqp-1.1.jar`` -- AMQP integration @@ -90,33 +102,33 @@ We also have Akka Modules containing add-on modules for the core of Akka. You ca Downloading and installing Scala -------------------------------- -If you want to be able to build and run the tutorial sample from the command line then you have to install the Scala distribution. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. +To build and run the tutorial sample from the command line, you have to install the Scala distribution. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. -Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0 final release. If you pick the ``tgz`` or ``zip`` distributions then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. +Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. -You also need to make sure that the ``scala-2.9.0-final/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: +You also need to make sure that the ``scala-2.9.0.RC1/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: - $ export PATH=$PATH:scala-2.9.0-final/bin + $ export PATH=$PATH:scala-2.9.0.RC1/bin -Now you can test you installation by invoking and see the printout:: +You can test your installation by invoking scala:: $ scala -version - Scala code runner version 2.9.0.final -- Copyright 2002-2011, LAMP/EPFL + Scala code runner version 2.9.0.RC1 -- Copyright 2002-2011, LAMP/EPFL Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first). -Some tools requires you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that. +Some tools require you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that. Downloading and installing SBT ------------------------------ -SBT, short for 'Simple Build Tool' is an excellent build system written in Scala. You are using Scala to write the build scripts which gives you a lot of power. It has a plugin architecture with many plugins available, something that we will take advantage of soon. SBT is the preferred way of building software in Scala. If you want to use SBT for this tutorial then follow the following instructions, if not you can skip this section and the next. +SBT, short for 'Simple Build Tool' is an excellent build system written in Scala. It uses Scala to write the build scripts which gives you a lot of power. It has a plugin architecture with many plugins available, something that we will take advantage of soon. SBT is the preferred way of building software in Scala and is probably the easiest way of getting through this tutorial. If you want to use SBT for this tutorial then follow the following instructions, if not you can skip this section and the next. -To install SBT and create a project for this tutorial it is easiest to follow the instructions on `this page `_. The preferred SBT version to install is ``0.7.6``. +First browse to `http://code.google.com/p/simple-build-tool/downloads/list `_ and download the ``0.7.6.RC0`` distribution. -If you have created an SBT project then step into the newly created SBT project, create a source file ``Pi.scala`` for the tutorial sample and put it in the ``src/main/scala`` directory. +To install SBT and create a project for this tutorial it is easiest to follow the instructions on `http://code.google.com/p/simple-build-tool/wiki/Setup `_. -So far we only have a standard Scala project but now we need to make our project an Akka project. You could add the dependencies manually to the build script, but the easiest way is to use Akka's SBT Plugin, covered in the next section. +Now we need to create our first Akka project. You could add the dependencies manually to the build script, but the easiest way is to use Akka's SBT Plugin, covered in the next section. Creating an Akka SBT project ---------------------------- @@ -128,10 +140,10 @@ If you have not already done so, now is the time to create an SBT project for ou Name: Tutorial 1 Organization: Hakkers Inc Version [1.0]: - Scala version [2.9.0]: - sbt version [0.7.6]: + Scala version [2.9.0.RC1]: + sbt version [0.7.6.RC0]: -Now we have the basis for an SBT project. Akka has an SBT Plugin that makes it very easy to use Akka is an SBT-based project so let's use that. +Now we have the basis for an SBT project. Akka has an SBT Plugin making it very easy to use Akka is an SBT-based project so let's use that. To use the plugin, first add a plugin definition to your SBT project by creating a ``Plugins.scala`` file in the ``project/plugins`` directory containing:: @@ -142,7 +154,7 @@ To use the plugin, first add a plugin definition to your SBT project by creating val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1" } -Now we need to create a project definition using our Akka SBT plugin. We do that by creating a ``Project.scala`` file in the ``build`` directory containing:: +Now we need to create a project definition using our Akka SBT plugin. We do that by creating a ``project/build/Project.scala`` file containing:: import sbt._ @@ -152,14 +164,14 @@ Now we need to create a project definition using our Akka SBT plugin. We do that The magic is in mixing in the ``AkkaProject`` trait. -Not needed in this tutorial, but if you would like to use additional Akka modules than ``akka-actor`` then you can add these as "module configurations" in the project file. Here is an example adding ``akka-remote`` and ``akka-stm``:: +Not needed in this tutorial, but if you would like to use additional Akka modules beyond ``akka-actor``, you can add these as "module configurations" in the project file. Here is an example adding ``akka-remote`` and ``akka-stm``:: class AkkaSampleProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { val akkaSTM = akkaModule("stm") val akkaRemote = akkaModule("remote") } -So, now we are all set. Just one final thing to do; make SBT download all dependencies it needs. That is done by invoking:: +So, now we are all set. Just one final thing to do; make SBT download the dependencies it needs. That is done by invoking:: > update @@ -168,13 +180,13 @@ SBT itself needs a whole bunch of dependencies but our project will only need on Start writing the code ---------------------- -Now it's about time that we start hacking. +Now it's about time to start hacking. -We start by creating a ``Pi.scala`` file and add these import statements at the top of the file:: +We start by creating a ``Pi.scala`` file and adding these import statements at the top of the file:: - package akka.tutorial.scala.first + package akka.tutorial.first.scala - import akka.actor.{Actor, ActorRef, PoisonPill} + import akka.actor.{Actor, PoisonPill} import Actor._ import akka.routing.{Routing, CyclicIterator} import Routing._ @@ -184,12 +196,12 @@ We start by creating a ``Pi.scala`` file and add these import statements at the If you are using SBT in this tutorial then create the file in the ``src/main/scala`` directory. -If you are using the command line tools then just create the file wherever you want. I will create it in a directory called ``tutorial`` at the root of the Akka distribution, e.g. in ``$AKKA_HOME/tutorial/Pi.scala``. +If you are using the command line tools then create the file wherever you want. I will create it in a directory called ``tutorial`` at the root of the Akka distribution, e.g. in ``$AKKA_HOME/tutorial/Pi.scala``. Creating the messages --------------------- -The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, sends out these work chunks to the different workers in a round-robin fashion. The master then waits until all the workers have completed all the work and sent back the result for aggregation. When computation is completed the master prints out the result, shuts down all workers an then himself. +The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, and sends these chunks to the different workers in a round-robin fashion. The master waits until all the workers have completed their work and sent back results for aggregation. When computation is completed the master prints out the result, shuts down all workers and then itself. With this in mind, let's now create the messages that we want to have flowing in the system. We need three different messages: @@ -219,14 +231,14 @@ Now we can create the worker actor. This is done by mixing in the ``Actor`` tra } } -As you can see we have now created an ``Actor`` with a ``receive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``self.reply``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference use. +As you can see we have now created an ``Actor`` with a ``receive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``self.reply``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference for future use. -The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method. There are many ways we can implement this algorithm in Scala, in this introductory tutorial we have chosen an imperative style using a for comprehension and an accumulator:: +The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method. While there are many ways we can implement this algorithm in Scala, in this introductory tutorial we have chosen an imperative style using a for comprehension and an accumulator:: def calculatePiFor(start: Int, nrOfElements: Int): Double = { var acc = 0.0 for (i <- start until (start + nrOfElements)) - acc += 4 * math.pow(-1, i) / (2 * i + 1) + acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1) acc } @@ -243,17 +255,28 @@ The master actor is a little bit more involved. In its constructor we need to cr As you can see we are using the ``actorOf`` factory method to create actors, this method returns as an ``ActorRef`` which is a reference to our newly created actor. This method is available in the ``Actor`` object but is usually imported:: - import akka.actor.Actor._ + import akka.actor.Actor.actorOf -Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above to see that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We had to pass in three integer variables needed: +There are two versions of ``actorOf``; one of them taking a actor type and the other one an instance of an actor. The former one (``actorOf[MyActor]``) is used when the actor class has a no-argument constructor while the second one (``actorOf(new MyActor(..))``) is used when the actor class has a constructor that takes arguments. This is the only way to create an instance of an Actor and the ``actorOf`` method ensures this. The latter version is using call-by-name and lazily creates the actor within the scope of the ``actorOf`` method. The ``actorOf`` method instantiates the actor and returns, not an instance to the actor, but an instance to an ``ActorRef``. This reference is the handle through which you communicate with the actor. It is immutable, serializable and location-aware meaning that it "remembers" its original actor even if it is sent to other nodes across the network and can be seen as the equivalent to the Erlang actor's PID. + +The actor's life-cycle is: + +- Created -- ``Actor.actorOf[MyActor]`` -- can **not** receive messages +- Started -- ``actorRef.start()`` -- can receive messages +- Stopped -- ``actorRef.stop()`` -- can **not** receive messages + +Once the actor has been stopped it is dead and can not be started again. + +Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above, you saw that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We have to pass in three integer variables: - ``nrOfWorkers`` -- defining how many workers we should start up -- ``nrOfMessages`` -- defining how many number chunks should send out to the workers +- ``nrOfMessages`` -- defining how many number chunks to send out to the workers - ``nrOfElements`` -- defining how big the number chunks sent to each worker should be -Let's now write the master actor:: +Here is the master actor:: - class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) + class Master( + nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) extends Actor { var pi: Double = _ @@ -269,19 +292,21 @@ Let's now write the master actor:: def receive = { ... } override def preStart { - start = now + start = System.currentTimeMillis } override def postStop { // tell the world that the calculation is complete - println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start))) + println( + "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" + .format(pi, (System.currentTimeMillis - start))) latch.countDown() } } -Couple of things are worth explaining further. +A couple of things are worth explaining further. -First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for doing plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``!!!`` to achive the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now. +First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``!!!`` to achieve the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now. Second, we are adding a couple of life-cycle callback methods; ``preStart`` and ``postStop``. In the ``preStart`` callback we are recording the time when the actor is started and in the ``postStop`` callback we are printing out the result (the approximation of Pi) and the time it took to calculate it. In this call we also invoke ``latch.countDown`` to tell the outside world that we are done. @@ -290,11 +315,11 @@ But we are not done yet. We are missing the message handler for the ``Master`` a - ``Calculate`` -- which should start the calculation - ``Result`` -- which should aggregate the different results -The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``Broadcast(PoisonPill)`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). The ``PoisonPill`` is a special kind of message that tells the receiver to shut himself down using the normal shutdown; ``self.stop()``. Then we also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down). +The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``Broadcast(PoisonPill)`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). ``PoisonPill`` is a special kind of message that tells the receiver to shut itself down using the normal shutdown method; ``self.stop``. We also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down). -The ``Result`` handler is simpler, here we just get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back and if it matches the number of tasks sent out the ``Master`` actor considers itself done and shuts himself down. +The ``Result`` handler is simpler, here we get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back, and if that matches the number of tasks sent out, the ``Master`` actor considers itself done and shuts down. -Now, let's capture this in code:: +Let's capture this in code:: // message handler def receive = { @@ -318,7 +343,9 @@ Now, let's capture this in code:: Bootstrap the calculation ------------------------- -Now the only thing that is left to implement is the runner that should bootstrap and run his calculation for us. We do that by creating an object that we call ``Pi``, here we can extend the ``App`` trait in Scala which means that we will be able to run this as an application directly from the command line. The ``Pi`` object is a perfect container module for our actors and messages, so let's put them all there. We also create a method ``calculate`` in which we start up the ``Master`` actor and waits for it to finish:: +Now the only thing that is left to implement is the runner that should bootstrap and run the calculation for us. We do that by creating an object that we call ``Pi``, here we can extend the ``App`` trait in Scala, which means that we will be able to run this as an application directly from the command line. + +The ``Pi`` object is a perfect container module for our actors and messages, so let's put them all there. We also create a method ``calculate`` in which we start up the ``Master`` actor and wait for it to finish:: object Pi extends App { @@ -332,7 +359,8 @@ Now the only thing that is left to implement is the runner that should bootstrap val latch = new CountDownLatch(1) // create the master - val master = actorOf(new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() + val master = actorOf( + new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() // start the calculation master ! Calculate @@ -346,14 +374,13 @@ That's it. Now we are done. But before we package it up and run it, let's take a look at the full code now, with package declaration, imports and all:: - package akka.tutorial.scala.first + package akka.tutorial.first.scala import akka.actor.{Actor, PoisonPill} import Actor._ import akka.routing.{Routing, CyclicIterator} import Routing._ - import System.{currentTimeMillis => now} import java.util.concurrent.CountDownLatch object Pi extends App { @@ -377,7 +404,7 @@ But before we package it up and run it, let's take a look at the full code now, def calculatePiFor(start: Int, nrOfElements: Int): Double = { var acc = 0.0 for (i <- start until (start + nrOfElements)) - acc += 4 * math.pow(-1, i) / (2 * i + 1) + acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1) acc } @@ -390,7 +417,8 @@ But before we package it up and run it, let's take a look at the full code now, // ================== // ===== Master ===== // ================== - class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) + class Master( + nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) extends Actor { var pi: Double = _ @@ -407,7 +435,7 @@ But before we package it up and run it, let's take a look at the full code now, def receive = { case Calculate => // schedule work - //for (arg <- 0 until nrOfMessages) router ! Work(arg, nrOfElements) + //for (start <- 0 until nrOfMessages) router ! Work(start, nrOfElements) for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements) // send a PoisonPill to all workers telling them to shut down themselves @@ -424,12 +452,14 @@ But before we package it up and run it, let's take a look at the full code now, } override def preStart { - start = now + start = System.currentTimeMillis } override def postStop { // tell the world that the calculation is complete - println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start))) + println( + "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" + .format(pi, (System.currentTimeMillis - start))) latch.countDown() } } @@ -443,7 +473,8 @@ But before we package it up and run it, let's take a look at the full code now, val latch = new CountDownLatch(1) // create the master - val master = actorOf(new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() + val master = actorOf( + new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start() // start the calculation master ! Calculate @@ -456,27 +487,31 @@ But before we package it up and run it, let's take a look at the full code now, Run it as a command line application ------------------------------------ -If you have not typed (or copied) in the code for the tutorial in the ``$AKKA_HOME/tutorial/Pi.scala`` then now is the time. When that is done open up a shell and step in to the Akka distribution (``cd $AKKA_HOME``). +If you have not typed in (or copied) the code for the tutorial as ``$AKKA_HOME/tutorial/Pi.scala`` then now is the time. When that's done open up a shell and step in to the Akka distribution (``cd $AKKA_HOME``). First we need to compile the source file. That is done with Scala's compiler ``scalac``. Our application depends on the ``akka-actor-1.1.jar`` JAR file, so let's add that to the compiler classpath when we compile the source:: $ scalac -cp dist/akka-actor-1.1.jar tutorial/Pi.scala -When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` JAR file to the classpath, this time we also need to add the Scala runtime library ``scala-library.jar`` and the classes we compiled ourselves to the classpath:: +When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` JAR file to the classpath, and this time we also need to add the Scala runtime library ``scala-library.jar`` and the classes we compiled ourselves:: - $ java -cp dist/akka-actor-1.1.jar:scala-library.jar:tutorial akka.tutorial.scala.first.Pi - AKKA_HOME is defined as [/Users/jboner/src/akka-stuff/akka-core], loading config from \ - [/Users/jboner/src/akka-stuff/akka-core/config/akka.conf]. + $ java \ + -cp dist/akka-actor-1.1.jar:scala-library.jar:tutorial \ + akka.tutorial.first.scala.Pi + AKKA_HOME is defined as [/Users/jboner/src/akka-stuff/akka-core] + loading config from [/Users/jboner/src/akka-stuff/akka-core/config/akka.conf]. Pi estimate: 3.1435501812459323 Calculation time: 858 millis Yippee! It is working. +If you have not defined the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. + Run it inside SBT ----------------- -If you have based the tutorial on SBT then you can run the application directly inside SBT. First you need to compile the project:: +If you used SBT, then you can run the application directly inside SBT. First you need to compile the project:: $ sbt > update @@ -493,11 +528,15 @@ When this in done we can run our application directly inside SBT:: Yippee! It is working. +If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. + Conclusion ---------- -Now we have learned how to create our first Akka project utilizing Akka's actors to speed up a computation intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned how to compile and run an Akka project utilizing either the tools on the command line or the SBT build system. +We have learned how to create our first Akka project using Akka's actors to speed up a computation-intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned to compile and run an Akka project using either the tools on the command line or the SBT build system. -Now we are ready to take on more advanced problems. In the next tutorial we will build upon this one, refactor it into more idiomatic Akka and Scala code and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_. +If you have a multi-core machine then I encourage you to try out different number of workers (number of working actors) by tweaking the ``nrOfWorkers`` variable to for example; 2, 4, 6, 8 etc. to see performance improvement by scaling up. + +Now we are ready to take on more advanced problems. In the next tutorial we will build on this one, refactor it into more idiomatic Akka and Scala code, and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_. Happy hakking. diff --git a/akka-docs/intro/getting-started-first.rst b/akka-docs/intro/getting-started-first.rst new file mode 100644 index 0000000000..aaa466c1dc --- /dev/null +++ b/akka-docs/intro/getting-started-first.rst @@ -0,0 +1,335 @@ +Getting Started Tutorial: First Chapter +======================================= + +Introduction +------------ + +Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala are and will now focus on the steps necessary to start your first project. + +There are two variations of this first tutorial: + +- creating a standalone project and run it from the command line +- creating a SBT (Simple Build Tool) project and running it from within SBT + +Since they are so similar we will present them both. + +The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster. + +We will be using an algorithm that is called "embarrassingly parallel" which just means that each job is completely isolated and not coupled with any other job. Since this algorithm is so parallelizable it suits the actor model very well. + +Here is the formula for the algorithm we will use: + +.. image:: pi-formula.png + +In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result. + +Tutorial source code +-------------------- + +If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_. + +Prerequisites +------------- + +This tutorial assumes that you have Jave 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code. + +Downloading and installing Akka +------------------------------- + +To build and run the tutorial sample from the command line, you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. + +Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory. + +You need to do one more thing in order to install Akka properly: set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell, navigating down to the distribution, and setting the ``AKKA_HOME`` variable:: + + $ cd /Users/jboner/tools/akka-1.1 + $ export AKKA_HOME=`pwd` + $ echo $AKKA_HOME + /Users/jboner/tools/akka-1.1 + +The distribution looks like this:: + + $ ls -l + total 16944 + drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 . + drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 .. + drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy + drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist + drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed + -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar + drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts + +- In the ``dist`` directory we have the Akka JARs, including sources and docs. +- In the ``lib_managed/compile`` directory we have Akka's dependency JARs. +- In the ``deploy`` directory we have the sample JARs. +- In the ``scripts`` directory we have scripts for running Akka. +- Finally ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. + +The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors. + +Akka is very modular and has many JARs for containing different features. The core distribution has seven modules: + +- ``akka-actor-1.1.jar`` -- Standard Actors +- ``akka-typed-actor-1.1.jar`` -- Typed Actors +- ``akka-remote-1.1.jar`` -- Remote Actors +- ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures +- ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration +- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener +- ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors + +We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: + +- ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) +- ``akka-amqp-1.1.jar`` -- AMQP integration +- ``akka-camel-1.1.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world) +- ``akka-camel-typed-1.1.jar`` -- Apache Camel Typed Actors integration +- ``akka-scalaz-1.1.jar`` -- Support for the Scalaz library +- ``akka-spring-1.1.jar`` -- Spring framework integration +- ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + +Downloading and installing Scala +-------------------------------- + +To build and run the tutorial sample from the command line, you have to install the Scala distribution. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. + +Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. + +You also need to make sure that the ``scala-2.9.0.RC1/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: + + $ export PATH=$PATH:scala-2.9.0.RC1/bin + +You can test your installation by invoking scala:: + + $ scala -version + Scala code runner version 2.9.0.RC1 -- Copyright 2002-2011, LAMP/EPFL + +Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first). + +Some tools require you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that. + +Downloading and installing SBT +------------------------------ + +SBT, short for 'Simple Build Tool' is an excellent build system written in Scala. It uses Scala to write the build scripts which gives you a lot of power. It has a plugin architecture with many plugins available, something that we will take advantage of soon. SBT is the preferred way of building software in Scala and is probably the easiest way of getting through this tutorial. If you want to use SBT for this tutorial then follow the following instructions, if not you can skip this section and the next. + +First browse to the `SBT download page `_ and download the ``0.7.6.RC0`` distribution. + +To install SBT and create a project for this tutorial it is easiest to follow the instructions on `this page `_. + +If you have created an SBT project then step into the newly created SBT project, create a source file ``Pi.scala`` for the tutorial sample and put it in the ``src/main/scala`` directory. + +So far we only have a standard Scala project but now we need to make our project an Akka project. You could add the dependencies manually to the build script, but the easiest way is to use Akka's SBT Plugin, covered in the next section. + +Creating an Akka SBT project +---------------------------- + +If you have not already done so, now is the time to create an SBT project for our tutorial. You do that by stepping into the directory you want to create your project in and invoking the ``sbt`` command answering the questions for setting up your project (just pressing ENTER will choose the default in square brackets):: + + $ sbt + Project does not exist, create new project? (y/N/s) y + Name: Tutorial 1 + Organization: Hakkers Inc + Version [1.0]: + Scala version [2.9.0.RC1]: + sbt version [0.7.6.RC0]: + +Now we have the basis for an SBT project. Akka has an SBT Plugin making it very easy to use Akka is an SBT-based project so let's use that. + +To use the plugin, first add a plugin definition to your SBT project by creating a ``Plugins.scala`` file in the ``project/plugins`` directory containing:: + + import sbt._ + + class Plugins(info: ProjectInfo) extends PluginDefinition(info) { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1" + } + +Now we need to create a project definition using our Akka SBT plugin. We do that by creating a ``project/build/Project.scala`` file containing:: + + import sbt._ + + class TutorialOneProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + } + +The magic is in mixing in the ``AkkaProject`` trait. + +Not needed in this tutorial, but if you would like to use additional Akka modules beyond ``akka-actor``, you can add these as "module configurations" in the project file. Here is an example adding ``akka-remote`` and ``akka-stm``:: + + class AkkaSampleProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { + val akkaSTM = akkaModule("stm") + val akkaRemote = akkaModule("remote") + } + +So, now we are all set. Just one final thing to do; make SBT download the dependencies it needs. That is done by invoking:: + + > update + +SBT itself needs a whole bunch of dependencies but our project will only need one; ``akka-actor-1.1.jar``. SBT downloads that as well. + +Start writing the code +---------------------- + +Now it's about time to start hacking. + +We start by creating a ``Pi.scala`` file and adding these import statements at the top of the file: + +.. includecode:: examples/Pi.scala#imports + +If you are using SBT in this tutorial then create the file in the ``src/main/scala`` directory. + +If you are using the command line tools then create the file wherever you want. I will create it in a directory called ``tutorial`` at the root of the Akka distribution, e.g. in ``$AKKA_HOME/tutorial/Pi.scala``. + +Creating the messages +--------------------- + +The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, and sends these chunks to the different workers in a round-robin fashion. The master waits until all the workers have completed their work and sent back results for aggregation. When computation is completed the master prints out the result, shuts down all workers and then itself. + +With this in mind, let's now create the messages that we want to have flowing in the system. We need three different messages: + +- ``Calculate`` -- sent to the ``Master`` actor to start the calculation +- ``Work`` -- sent from the ``Master`` actor to the ``Worker`` actors containing the work assignment +- ``Result`` -- sent from the ``Worker`` actors to the ``Master`` actor containing the result from the worker's calculation + +Messages sent to actors should always be immutable to avoid sharing mutable state. In scala we have 'case classes' which make excellent messages. So let's start by creating three messages as case classes. We also create a common base trait for our messages (that we define as being ``sealed`` in order to prevent creating messages outside our control): + +.. includecode:: examples/Pi.scala#messages + +Creating the worker +------------------- + +Now we can create the worker actor. This is done by mixing in the ``Actor`` trait and defining the ``receive`` method. The ``receive`` method defines our message handler. We expect it to be able to handle the ``Work`` message so we need to add a handler for this message: + +.. includecode:: examples/Pi.scala#worker + :exclude: calculate-pi + +As you can see we have now created an ``Actor`` with a ``receive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``self.reply``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference for future use. + +The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method. While there are many ways we can implement this algorithm in Scala, in this introductory tutorial we have chosen an imperative style using a for comprehension and an accumulator: + +.. includecode:: examples/Pi.scala#calculate-pi + +Creating the master +------------------- + +The master actor is a little bit more involved. In its constructor we need to create the workers (the ``Worker`` actors) and start them. We will also wrap them in a load-balancing router to make it easier to spread out the work evenly between the workers. Let's do that first: + +.. includecode:: examples/Pi.scala#create-workers + +As you can see we are using the ``actorOf`` factory method to create actors, this method returns as an ``ActorRef`` which is a reference to our newly created actor. This method is available in the ``Actor`` object but is usually imported:: + + import akka.actor.Actor._ + +Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above, you saw that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We have to pass in three integer variables: + +- ``nrOfWorkers`` -- defining how many workers we should start up +- ``nrOfMessages`` -- defining how many number chunks to send out to the workers +- ``nrOfElements`` -- defining how big the number chunks sent to each worker should be + +Here is the master actor: + +.. includecode:: examples/Pi.scala#master + :exclude: message-handling + +A couple of things are worth explaining further. + +First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``!!!`` to achive the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now. + +Second, we are adding a couple of life-cycle callback methods; ``preStart`` and ``postStop``. In the ``preStart`` callback we are recording the time when the actor is started and in the ``postStop`` callback we are printing out the result (the approximation of Pi) and the time it took to calculate it. In this call we also invoke ``latch.countDown`` to tell the outside world that we are done. + +But we are not done yet. We are missing the message handler for the ``Master`` actor. This message handler needs to be able to react to two different messages: + +- ``Calculate`` -- which should start the calculation +- ``Result`` -- which should aggregate the different results + +The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``Broadcast(PoisonPill)`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). ``PoisonPill`` is a special kind of message that tells the receiver to shut itself down using the normal shutdown method; ``self.stop``. We also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down). + +The ``Result`` handler is simpler, here we get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back, and if that matches the number of tasks sent out, the ``Master`` actor considers itself done and shuts down. + +Let's capture this in code: + +.. includecode:: examples/Pi.scala#master-receive + +Bootstrap the calculation +------------------------- + +Now the only thing that is left to implement is the runner that should bootstrap and run the calculation for us. We do that by creating an object that we call ``Pi``, here we can extend the ``App`` trait in Scala, which means that we will be able to run this as an application directly from the command line. + +The ``Pi`` object is a perfect container module for our actors and messages, so let's put them all there. We also create a method ``calculate`` in which we start up the ``Master`` actor and wait for it to finish: + +.. includecode:: examples/Pi.scala#app + :exclude: actors-and-messages + +That's it. Now we are done. + +But before we package it up and run it, let's take a look at the full code now, with package declaration, imports and all: + +.. includecode:: examples/Pi.scala + + + +Run it as a command line application +------------------------------------ + +If you have not typed in (or copied) the code for the tutorial as ``$AKKA_HOME/tutorial/Pi.scala`` then now is the time. When that's done open up a shell and step in to the Akka distribution (``cd $AKKA_HOME``). + +First we need to compile the source file. That is done with Scala's compiler ``scalac``. Our application depends on the ``akka-actor-1.1.jar`` JAR file, so let's add that to the compiler classpath when we compile the source:: + + $ scalac -cp dist/akka-actor-1.1.jar tutorial/Pi.scala + +When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` JAR file to the classpath, and this time we also need to add the Scala runtime library ``scala-library.jar`` and the classes we compiled ourselves:: + + $ java -cp dist/akka-actor-1.1.jar:scala-library.jar:tutorial akka.tutorial.scala.first.Pi + AKKA_HOME is defined as [/Users/jboner/src/akka-stuff/akka-core], loading config from \ + [/Users/jboner/src/akka-stuff/akka-core/config/akka.conf]. + + Pi estimate: 3.1435501812459323 + Calculation time: 858 millis + +Yippee! It is working. + +If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. + +Run it inside SBT +----------------- + +If you used SBT, then you can run the application directly inside SBT. First you need to compile the project:: + + $ sbt + > update + ... + > compile + ... + +When this in done we can run our application directly inside SBT:: + + > run + ... + Pi estimate: 3.1435501812459323 + Calculation time: 942 millis + +Yippee! It is working. + +If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. + +The implementation in more detail +--------------------------------- + +To create our actors we used a method called ``actorOf`` in the ``Actor`` object. We used it in two different ways, one of them taking a actor type and the other one an instance of an actor. The former one (``actorOf[Worker]``) is used when the actor class has a no-argument constructor while the second one (``actorOf(new Master(..))``) is used when the actor class has a constructor that takes arguments. This is the only way to create an instance of an Actor and the ``actorOf`` method ensures this. The latter version is using call-by-name and lazily creates the actor within the scope of the ``actorOf`` method. The ``actorOf`` method instantiates the actor and returns, not an instance to the actor, but an instance to an ``ActorRef``. This reference is the handle through which you communicate with the actor. It is immutable, serializable and location-aware meaning that it "remembers" its original actor even if it is sent to other nodes across the network and can be seen as the equivalent to the Erlang actor's PID. + +The actor's life-cycle is: + +- Created -- ``Actor.actorOf[MyActor]`` -- can **not** receive messages +- Started -- ``actorRef.start()`` -- can receive messages +- Stopped -- ``actorRef.stop()`` -- can **not** receive messages + +Once the actor has been stopped it is dead and can not be started again. + +Conclusion +---------- + +We have learned how to create our first Akka project using Akka's actors to speed up a computation-intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned to compile and run an Akka project using either the tools on the command line or the SBT build system. + +Now we are ready to take on more advanced problems. In the next tutorial we will build on this one, refactor it into more idiomatic Akka and Scala code, and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_. + +Happy hakking. diff --git a/akka-docs/intro/import-project.png b/akka-docs/intro/import-project.png new file mode 100644 index 0000000000..5774e9d412 Binary files /dev/null and b/akka-docs/intro/import-project.png differ diff --git a/akka-docs/intro/index.rst b/akka-docs/intro/index.rst new file mode 100644 index 0000000000..8df1a87a5d --- /dev/null +++ b/akka-docs/intro/index.rst @@ -0,0 +1,12 @@ +Introduction +============ + +.. toctree:: + :maxdepth: 2 + + why-akka + getting-started-first-scala + getting-started-first-scala-eclipse + getting-started-first-java + building-akka + configuration diff --git a/akka-docs/intro/install-beta2-updatesite.png b/akka-docs/intro/install-beta2-updatesite.png new file mode 100644 index 0000000000..4eb85682ad Binary files /dev/null and b/akka-docs/intro/install-beta2-updatesite.png differ diff --git a/akka-docs/manual/pi-formula.png b/akka-docs/intro/pi-formula.png similarity index 100% rename from akka-docs/manual/pi-formula.png rename to akka-docs/intro/pi-formula.png diff --git a/akka-docs/intro/quickfix.png b/akka-docs/intro/quickfix.png new file mode 100644 index 0000000000..f4f2811e52 Binary files /dev/null and b/akka-docs/intro/quickfix.png differ diff --git a/akka-docs/intro/run-config.png b/akka-docs/intro/run-config.png new file mode 100644 index 0000000000..912f958223 Binary files /dev/null and b/akka-docs/intro/run-config.png differ diff --git a/akka-docs/intro/why-akka.rst b/akka-docs/intro/why-akka.rst new file mode 100644 index 0000000000..512a669b2f --- /dev/null +++ b/akka-docs/intro/why-akka.rst @@ -0,0 +1,68 @@ +Why Akka? +========= + +What features can the Akka platform offer, over the competition? +---------------------------------------------------------------- + +Akka is an unified runtime and programming model for: + +- Scale up (Concurrency) +- Scale out (Remoting) +- Fault tolerance + +One thing to learn and admin, with high cohesion and coherent semantics. + +Akka is a very scalable piece of software, not only in the performance sense, +but in the size of applications it is useful for. The core of Akka, akka-actor, +is very small and easily dropped into an existing project where you need +asynchronicity and lockless concurrency without hassle. + +You can choose to include only the parts of akka you need in your application +and then there's the whole package, the Akka Microkernel, which is a standalone +container to deploy your Akka application in. With CPUs growing more and more +cores every cycle, Akka is the alternative that provides outstanding performance +even if you're only running it on one machine. Akka also supplies a wide array +of concurrency-paradigms, allowing for users to choose the right tool for the +job. + +The integration possibilities for Akka Actors are immense through the Apache +Camel integration. We provide Software Transactional Memory concurrency control +through the excellent Multiverse project, and have integrated that with Actors, +creating Transactors for coordinated concurrent transactions. We have Agents and +Dataflow concurrency as well. + + +What's a good use-case for Akka? +-------------------------------- + +(Web, Cloud, Application) Services - Actors lets you manage service failures +(Supervisors), load management (back-off strategies, timeouts and +processing-isolation), both horizontal and vertical scalability (add more cores +and/or add more machines). Think payment processing, invoicing, order matching, +datacrunching, messaging. Really any highly transactional systems like banking, +betting, games. + +Here's what some of the Akka users have to say about how they are using Akka: +http://stackoverflow.com/questions/4493001/good-use-case-for-akka + + +Cloudy Akka +----------- + +And that's all in the ApacheV2-licensed open source project. On top of that we +have a commercial product called Cloudy Akka which provides the following +features: + +#. Dynamically clustered ActorRegistry with both automatic and manual migration + of actors + +#. Cluster membership and cluster event subscriptions + +#. Durable actor mailboxes of different sizes and shapes - file-backed, + Redis-backed, ZooKeeper-backed, Beanstalkd-backed and with AMQP and JMS-based + in the works + +#. Monitoring influenced by Dapper for cross-machine message tracing and + JMX-exposed statistics + +Read more `here `_. diff --git a/akka-docs/manual/more.png b/akka-docs/manual/more.png deleted file mode 100644 index 3eb7b05c84..0000000000 Binary files a/akka-docs/manual/more.png and /dev/null differ diff --git a/akka-docs/pending/Migration-1.0-1.1.rst b/akka-docs/pending/Migration-1.0-1.1.rst deleted file mode 100644 index b9f88bf4fc..0000000000 --- a/akka-docs/pending/Migration-1.0-1.1.rst +++ /dev/null @@ -1,32 +0,0 @@ -Moved to Scala 2.9.x -^^^^^^^^^^^^^^^^^^^^ - -Akka HTTP -========= - -# akka.servlet.Initializer has been moved to akka-kernel to be able to have akka-http not depend on akka-remote, if you don't want to use the class for kernel, just create your own version of akka.servlet.Initializer, it's just a couple of lines of code and there is instructions here: `Akka Http Docs `_ -# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter `_ -# Jersey-server is now a "provided" dependency for Akka-http, so you'll need to add the dependency to your project, it's built against Jersey 1.3 - -Akka Actor -========== - -# is now dependency free, with the exception of the dependency on the scala-library.jar -# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.aevent.EventHandler or by specifying the FQN of an Actor in the akka.conf under akka.event-handlers; there is an akka-slf4j module which still provides the Logging trait and a default SLF4J logger adapter. -# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: "akka.dispatch.HawtDispatcherConfigurator" instead of "HawtDispatcher" -# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value). - -Akka Typed Actor -================ - -All methods starting with 'get*' are deprecated and will be removed in post 1.1 release. - -Akka Remote -=========== - -# UnparsebleException => CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message) - -Akka Testkit -============ - -The TestKit moved into the akka-testkit subproject and correspondingly into the akka.testkit package. diff --git a/akka-docs/pending/articles.rst b/akka-docs/pending/articles.rst index 91138b404c..06f01f9a7d 100644 --- a/akka-docs/pending/articles.rst +++ b/akka-docs/pending/articles.rst @@ -16,9 +16,15 @@ Videos `Akka talk at Scala Days - March 2010 `_ +`Devoxx 2010 talk "Akka: Simpler Scalability, Fault-Tolerance, Concurrency" by Viktor Klang `_ + Articles -------- +`Actor-Based Continuations with Akka and Swarm `_ + +`Mimicking Twitter Using an Akka-Based Event-Driven Architecture `_ + `Remote Actor Class Loading with Akka `_ `Akka Producer Actors: New Features and Best Practices `_ diff --git a/akka-docs/pending/cluster-membership.rst b/akka-docs/pending/cluster-membership.rst deleted file mode 100644 index 6aa70e8bce..0000000000 --- a/akka-docs/pending/cluster-membership.rst +++ /dev/null @@ -1,89 +0,0 @@ -Cluster Membership (Scala) -========================== - -Module stability: **IN PROGRESS** - -Akka supports a Cluster Membership through a `JGroups `_ based implementation. JGroups is is a `P2P `_ clustering API - -Configuration -------------- - -The cluster is configured in 'akka.conf' by adding the Fully Qualified Name (FQN) of the actor class and serializer: - -.. code-block:: ruby - - remote { - cluster { - service = on - name = "default" # The name of the cluster - serializer = "akka.serialization.Serializer$Java" # FQN of the serializer class - } - } - -How to join the cluster ------------------------ - -The node joins the cluster when the 'RemoteNode' and/or 'RemoteServer' servers are started. - -Cluster API ------------ - -Interaction with the cluster is done through the 'akka.remote.Cluster' object. - -To send a message to all actors of a specific type on other nodes in the cluster use the 'relayMessage' function: - -.. code-block:: scala - - def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit - -Here is an example: - -.. code-block:: scala - - Cluster.relayMessage(classOf[ATypeOfActor], message) - -Traversing the remote nodes in the cluster to spawn remote actors: - -Cluster.foreach: - -.. code-block:: scala - - def foreach(f : (RemoteAddress) => Unit) : Unit - -Here's an example: - -.. code-block:: scala - - for(endpoint <- Cluster) spawnRemote[KungFuActor](endpoint.hostname,endpoint.port) - -and: - -.. code-block:: scala - - Cluster.foreach( endpoint => spawnRemote[KungFuActor](endpoint.hostname,endpoint.port) ) - -Cluster.lookup: - -.. code-block:: scala - - def lookup[T](handleRemoteAddress : PartialFunction[RemoteAddress, T]) : Option[T] - -Here is an example: - -.. code-block:: scala - - val myRemoteActor: Option[SomeActorType] = Cluster.lookup({ - case RemoteAddress(hostname, port) => spawnRemote[SomeActorType](hostname, port) - }) - - myRemoteActor.foreach(remoteActor => ...) - -Here is another example: - -.. code-block:: scala - Cluster.lookup({ - case remoteAddress @ RemoteAddress(_,_) => remoteAddress - }) match { - case Some(remoteAddress) => spawnAllRemoteActors(remoteAddress) - case None => handleNoRemoteNodeFound - } diff --git a/akka-docs/pending/configuration.rst b/akka-docs/pending/configuration.rst deleted file mode 100644 index 19d4a1a566..0000000000 --- a/akka-docs/pending/configuration.rst +++ /dev/null @@ -1,180 +0,0 @@ -Configuration -============= - -Specifying the configuration file ---------------------------------- - -If you don't specify a configuration file then Akka is using default values. If you want to override these then you should edit the 'akka.conf' file in the 'AKKA_HOME/config' directory. This config inherits from the 'akka-reference.conf' file that you see below, use your 'akka.conf' to override any property in the reference config. - -The config can be specified in a various of ways: - -* Define the '-Dakka.config=...' system property option. -* Put the 'akka.conf' file on the classpath. -* Define 'AKKA_HOME' environment variable pointing to the root of the Akka distribution, in which the config is taken from the 'AKKA_HOME/config' directory, you can also point to the AKKA_HOME by specifying the '-Dakka.home=...' system property option. - -Defining the configuration file -------------------------------- - -``_ -#################### -# Akka Config File # -#################### - -# This file has all the default settings, so all these could be removed with no visible effect. -# Modify as needed. - -akka { - version = "1.1-SNAPSHOT" # Akka version, checked against the runtime version of Akka. - - enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"] - - time-unit = "seconds" # Time unit for all timeout properties throughout the config - - event-handlers = ["akka.event.EventHandler$DefaultListener"] # event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT) - event-handler-level = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG - - # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up - # Can be used to bootstrap your application(s) - # Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor - # boot = ["sample.camel.Boot", - # "sample.rest.java.Boot", - # "sample.rest.scala.Boot", - # "sample.security.Boot"] - boot = [] - - actor { - timeout = 5 # Default timeout for Future based invocations - # - Actor: !! && !!! - # - UntypedActor: sendRequestReply && sendRequestReplyFuture - # - TypedActor: methods with non-void return type - serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability - throughput = 5 # Default throughput for all ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness - throughput-deadline-time = -1 # Default throughput deadline for all ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline - dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down - - default-dispatcher { - type = "GlobalExecutorBasedEventDriven" # Must be one of the following, all "Global*" are non-configurable - # - ExecutorBasedEventDriven - # - ExecutorBasedEventDrivenWorkStealing - # - GlobalExecutorBasedEventDriven - keep-alive-time = 60 # Keep alive time for threads - core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor) - max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor) - executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded - allow-core-timeout = on # Allow core threads to time out - rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard - throughput = 5 # Throughput for ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness - throughput-deadline-time = -1 # Throughput deadline for ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline - mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set using the property - # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, - # could lead to deadlock, use with care - # - # The following are only used for ExecutorBasedEventDriven - # and only if mailbox-capacity > 0 - mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout - # (in unit defined by the time-unit property) - } - } - - stm { - fair = on # Should global transactions be fair or non-fair (non fair yield better performance) - max-retries = 1000 - timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by - # the time-unit property) - write-skew = true - blocking-allowed = false - interruptible = false - speculative = true - quick-release = true - propagation = "requires" - trace-level = "none" - } - - jta { - provider = "from-jndi" # Options: - "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI) - # - "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta', - # e.g. you need the akka-jta JARs on classpath). - timeout = 60 - } - - http { - hostname = "localhost" - port = 9998 - - #If you are using akka.http.AkkaRestServlet - filters = ["se.scalablesolutions.akka.security.AkkaSecurityFilterFactory"] # List with all jersey filters to use - # resource-packages = ["sample.rest.scala", - # "sample.rest.java", - # "sample.security"] # List with all resource packages for your Jersey services - resource-packages = [] - - # The authentication service to use. Need to be overridden (sample now) - # authenticator = "sample.security.BasicAuthenticationService" - authenticator = "N/A" - - # Uncomment if you are using the KerberosAuthenticationActor - # kerberos { - # servicePrincipal = "HTTP/localhost@EXAMPLE.COM" - # keyTabLocation = "URL to keytab" - # kerberosDebug = "true" - # realm = "EXAMPLE.COM" - # } - kerberos { - servicePrincipal = "N/A" - keyTabLocation = "N/A" - kerberosDebug = "N/A" - realm = "" - } - - #If you are using akka.http.AkkaMistServlet - mist-dispatcher { - #type = "GlobalExecutorBasedEventDriven" # Uncomment if you want to use a different dispatcher than the default one for Comet - } - connection-close = true # toggles the addition of the "Connection" response header with a "close" value - root-actor-id = "_httproot" # the id of the actor to use as the root endpoint - root-actor-builtin = true # toggles the use of the built-in root endpoint base class - timeout = 1000 # the default timeout for all async requests (in ms) - expired-header-name = "Async-Timeout" # the name of the response header to use when an async request expires - expired-header-value = "expired" # the value of the response header to use when an async request expires - } - - remote { - - # secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_secure_cookie.sh' or using 'Crypt.generateSecureCookie' - secure-cookie = "" - - compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression - zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6 - - layer = "akka.remote.netty.NettyRemoteSupport" - - server { - hostname = "localhost" # The hostname or IP that clients should connect to - port = 2552 # The port clients should connect to. Default is 2552 (AKKA) - message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads - connection-timeout = 1 - require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)? - untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect. - backlog = 4096 # Sets the size of the connection backlog - execution-pool-keepalive = 60# Length in akka.time-unit how long core threads will be kept alive if idling - execution-pool-size = 16# Size of the core pool of the remote execution unit - max-channel-memory-size = 0 # Maximum channel size, 0 for off - max-total-memory-size = 0 # Maximum total size of all channels, 0 for off - } - - client { - buffering { - retry-message-send-on-failure = on - capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set using the property - } - reconnect-delay = 5 - read-timeout = 10 - message-frame-size = 1048576 - reap-futures-delay = 5 - reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for - } - } -} -``_ diff --git a/akka-docs/pending/dispatchers-java.rst b/akka-docs/pending/dispatchers-java.rst index 3aa1a34f13..7889db30fc 100644 --- a/akka-docs/pending/dispatchers-java.rst +++ b/akka-docs/pending/dispatchers-java.rst @@ -12,11 +12,7 @@ The event-based Actors currently consume ~600 bytes per Actor which means that y Default dispatcher ------------------ -For most scenarios the default settings are the best. Here we have one single event-based dispatcher for all Actors created. The dispatcher used is this one: - -.. code-block:: java - - Dispatchers.globalExecutorBasedEventDrivenDispatcher(); +For most scenarios the default settings are the best. Here we have one single event-based dispatcher for all Actors created. The dispatcher used is globalExecutorBasedEventDrivenDispatcher in akka.dispatch.Dispatchers. But if you feel that you are starting to contend on the single dispatcher (the 'Executor' and its queue) or want to group a specific set of Actors for a dedicated dispatcher for better flexibility and configurability then you can override the defaults and define your own dispatcher. See below for details on which ones are available and how they can be configured. @@ -49,7 +45,6 @@ There are six different types of message dispatchers: * Event-based * Priority event-based * Work-stealing event-based -* HawtDispatch-based event-driven Factory methods for all of these, including global versions of some of them, are in the 'akka.dispatch.Dispatchers' object. @@ -68,7 +63,7 @@ It would normally by used from within the actor like this: .. code-block:: java - class MyActor extends Actor { + class MyActor extends UntypedActor { public MyActor() { getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext())); } @@ -102,14 +97,18 @@ Here is an example: .. code-block:: java + import akka.actor.Actor; + import akka.dispatch.Dispatchers; + import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; + class MyActor extends UntypedActor { public MyActor() { getContext().setDispatcher(Dispatchers.newExecutorBasedEventDrivenDispatcher(name) - .withNewThreadPoolWithBoundedBlockingQueue(100) + .withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100) .setCorePoolSize(16) .setMaxPoolSize(128) .setKeepAliveTimeInMillis(60000) - .setRejectionPolicy(new CallerRunsPolicy) + .setRejectionPolicy(new CallerRunsPolicy()) .build()); } ... @@ -134,7 +133,7 @@ Priority event-based Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityExecutorBasedEventDrivenDispatcher and supply a java.util.Comparator[MessageInvocation] or use a akka.dispatch.PriorityGenerator (recommended): -Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator in Java: +Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator: .. code-block:: java @@ -155,8 +154,8 @@ Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator in // Create a new PriorityGenerator, lower prio means more important PriorityGenerator gen = new PriorityGenerator() { public int gen(Object message) { - if (message == "highpriority") return 0; // "highpriority" messages should be treated first if possible - else if (message == "lowpriority") return 100; // "lowpriority" messages should be treated last if possible + if (message.equals("highpriority")) return 0; // "highpriority" messages should be treated first if possible + else if (message.equals("lowpriority")) return 100; // "lowpriority" messages should be treated last if possible else return 50; // We default to 50 } }; @@ -193,12 +192,12 @@ Work-stealing event-based The 'ExecutorBasedEventDrivenWorkStealingDispatcher' is a variation of the 'ExecutorBasedEventDrivenDispatcher' in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they have less messages to process. This can be a great way to improve throughput at the cost of a little higher latency. -Normally the way you use it is to create an Actor companion object to hold the dispatcher and then set in in the Actor explicitly. +Normally the way you use it is to define a static field to hold the dispatcher and then set in in the Actor explicitly. .. code-block:: java class MyActor extends UntypedActor { - public static Dispatcher dispatcher = Dispatchers.newExecutorEventBasedWorkStealingDispatcher(name); + public static MessageDispatcher dispatcher = Dispatchers.newExecutorBasedEventDrivenWorkStealingDispatcher(name).build(); public MyActor() { getContext().setDispatcher(dispatcher); @@ -209,79 +208,6 @@ Normally the way you use it is to create an Actor companion object to hold the d Here is an article with some more information: `Load Balancing Actors with Work Stealing Techniques `_ Here is another article discussing this particular dispatcher: `Flexible load balancing with Akka in Scala `_ -HawtDispatch-based event-driven -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The 'HawtDispatcher' uses the `HawtDispatch threading library `_ which is a Java clone of libdispatch. All actors with this type of dispatcher are executed on a single system wide fixed sized thread pool. The number of of threads will match the number of cores available on your system. The dispatcher delivers messages to the actors in the order that they were producer at the sender. - -A 'HawtDispatcher' instance can be shared by many actors. Normally the way you use it is to create an Actor companion object to hold the dispatcher and then set in in the Actor explicitly. - -.. code-block:: java - - import akka.actor.dispatch.HawtDispatcher; - - class MyActor extends Actor { - public static Dispatcher dispatcher = new HawtDispatcher(); - - public MyActor() { - getContext().setDispatcher(dispatcher); - } - ... - } - -Since a fixed thread pool is being used, an actor using a 'HawtDispatcher' is restricted to executing non blocking operations. For example, the actor is NOT alllowed to: -* synchronously call another actor -* call 3rd party libraries that can block -* use sockets that are in blocking mode - -HawtDispatch supports integrating non-blocking Socket IO events with your actors. Every thread in the HawtDispatch thread pool is parked in an IO event loop when it is not executing an actors. The IO events can be configured to be get delivered to the actor in either the reactor or proactor style. For an example, see `HawtDispacherEchoServer.scala `_. - -A `HawtDispatcher` will aggregate cross actor messages by default. This means that if Actor *A* is executing and sends actor *B* 10 messages, those messages will not be delivered to actor *B* until *A*'s execution ends. HawtDispatch will aggregate the 10 messages into 1 single enqueue operation on to actor *B*'s inbox. This an significantly reduce mailbox contention when actors are very chatty. If you want to avoid this aggregation behavior, then create the `HawtDispatcher` like this: - -.. code-block:: java - - Dispatcher dispatcher = new HawtDispatcher(false); - -The `HawtDispatcher` provides a companion object that lets you use more advanced HawtDispatch features. For example to pin an actor so that it always executed on the same thread in the thread poool you would: - -.. code-block:: java - - ActorRef a = ... - HawtDispatcher.pin(a); - -If you have an Actor *b* which will be sending many messages to an Actor *a*, then you may want to consider setting *b*'s dispatch target to be *a*'s dispatch queue. When this is the case, messages sent from *b* to a will avoid cross thread mailbox contention. A side-effect of this is that the *a* and *b* actors will execute as if they shared a single mailbox. - -.. code-block:: java - - ActorRef a = ... - ActorRef b = ... - HawtDispatcher.target(b, HawtDispatcher.queue(a)); - -**Java API** - -.. code-block:: java - - MessageDispatcher dispatcher = Dispatchers.newExecutorEventBasedThreadPoolDispatcher(name); - -The dispatcher for an Typed Actor can be defined in the declarative configuration: - -.. code-block:: java - - ... // part of configuration - new Component( - MyTypedActor.class, - MyTypedActorImpl.class, - new LifeCycle(new Permanent()), - dispatcher, // <<== set it here - 1000); - ... - -It can also be set when creating a new Typed Actor programmatically. - -.. code-block:: java - - MyPOJO pojo = (MyPOJO) TypedActor.newInstance(MyPOJO.class, MyPOJOImpl.class, 1000, dispatcher); - Making the Actor mailbox bounded -------------------------------- @@ -295,7 +221,7 @@ You can make the Actor mailbox bounded by a capacity in two ways. Either you def actor { default-dispatcher { mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set to the number specificed + # If positive then a bounded mailbox is used and the capacity is set to the number specified } } @@ -310,7 +236,12 @@ For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingD class MyActor extends UntypedActor { public MyActor() { - getContext().setDispatcher(Dispatchers.newExecutorBasedEventDrivenDispatcher(name, throughput, mailboxCapacity)); + int capacity = 100; + Duration pushTimeout = new FiniteDuration(10, TimeUnit.SECONDS); + MailboxType mailboxCapacity = new BoundedMailbox(false, capacity, pushTimeout); + MessageDispatcher dispatcher = + Dispatchers.newExecutorBasedEventDrivenDispatcher(name, throughput, mailboxCapacity).build(); + getContext().setDispatcher(dispatcher); } ... } @@ -321,7 +252,9 @@ Making it bounded (by specifying a capacity) is optional, but if you do, you nee ``_ class MyActor extends UntypedActor { public MyActor() { - getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext(), mailboxCapacity, pushTimeout, pushTimeUnit)); + int mailboxCapacity = 100; + Duration pushTimeout = new FiniteDuration(10, TimeUnit.SECONDS); + getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext(), mailboxCapacity, pushTimeout)); } ... } diff --git a/akka-docs/pending/dispatchers-scala.rst b/akka-docs/pending/dispatchers-scala.rst index 9b67b7b58e..35df55724f 100644 --- a/akka-docs/pending/dispatchers-scala.rst +++ b/akka-docs/pending/dispatchers-scala.rst @@ -5,7 +5,7 @@ Module stability: **SOLID** The Dispatcher is an important piece that allows you to configure the right semantics and parameters for optimal performance, throughput and scalability. Different Actors have different needs. -Akka supports dispatchers for both event-driven lightweight threads, allowing creation of millions threads on a single workstation, and thread-based Actors, where each dispatcher is bound to a dedicated OS thread. +Akka supports dispatchers for both event-driven lightweight threads, allowing creation of millions of threads on a single workstation, and thread-based Actors, where each dispatcher is bound to a dedicated OS thread. The event-based Actors currently consume ~600 bytes per Actor which means that you can create more than 6.5 million Actors on 4 G RAM. @@ -47,12 +47,27 @@ There are six different types of message dispatchers: * Event-based * Priority event-based * Work-stealing -* HawtDispatch-based event-driven Factory methods for all of these, including global versions of some of them, are in the 'akka.dispatch.Dispatchers' object. Let's now walk through the different dispatchers in more detail. +Thread-based +^^^^^^^^^^^^ + +The 'ThreadBasedDispatcher' binds a dedicated OS thread to each specific Actor. The messages are posted to a 'LinkedBlockingQueue' which feeds the messages to the dispatcher one by one. A 'ThreadBasedDispatcher' cannot be shared between actors. This dispatcher has worse performance and scalability than the event-based dispatcher but works great for creating "daemon" Actors that consumes a low frequency of messages and are allowed to go off and do their own thing for a longer period of time. Another advantage with this dispatcher is that Actors do not block threads for each other. + +It would normally by used from within the actor like this: + +.. code-block:: java + + class MyActor extends Actor { + public MyActor() { + self.dispatcher = Dispatchers.newThreadBasedDispatcher(self) + } + ... + } + Event-based ^^^^^^^^^^^ @@ -80,9 +95,13 @@ Here is an example: .. code-block:: scala + import akka.actor.Actor + import akka.dispatch.Dispatchers + import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy + class MyActor extends Actor { self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(name) - .withNewThreadPoolWithBoundedBlockingQueue(100) + .withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100) .setCorePoolSize(16) .setMaxPoolSize(128) .setKeepAliveTimeInMillis(60000) @@ -110,12 +129,11 @@ Priority event-based Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityExecutorBasedEventDrivenDispatcher and supply a java.util.Comparator[MessageInvocation] or use a akka.dispatch.PriorityGenerator (recommended): -Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator in Java: +Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator: .. code-block:: scala import akka.dispatch._ - import akka.actor._ val gen = PriorityGenerator { // Create a new PriorityGenerator, lower prio means more important @@ -138,17 +156,11 @@ Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator in a.dispatcher.suspend(a) // Suspening the actor so it doesn't start to treat the messages before we have enqueued all of them :-) a ! 'lowpriority - a ! 'lowpriority - a ! 'highpriority - a ! 'pigdog - a ! 'pigdog2 - a ! 'pigdog3 - a ! 'highpriority a.dispatcher.resume(a) // Resuming the actor so it will start treating its messages @@ -173,7 +185,7 @@ Normally the way you use it is to create an Actor companion object to hold the d .. code-block:: scala object MyActor { - val dispatcher = Dispatchers.newExecutorEventBasedWorkStealingDispatcher(name) + val dispatcher = Dispatchers.newExecutorBasedEventDrivenWorkStealingDispatcher(name).build } class MyActor extends Actor { @@ -184,54 +196,6 @@ Normally the way you use it is to create an Actor companion object to hold the d Here is an article with some more information: `Load Balancing Actors with Work Stealing Techniques `_ Here is another article discussing this particular dispatcher: `Flexible load balancing with Akka in Scala `_ -HawtDispatch-based event-driven -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The 'HawtDispatcher' uses the `HawtDispatch threading library `_ which is a Java clone of libdispatch. All actors with this type of dispatcher are executed on a single system wide fixed sized thread pool. The number of of threads will match the number of cores available on your system. The dispatcher delivers messages to the actors in the order that they were producer at the sender. - -A 'HawtDispatcher' instance can be shared by many actors. Normally the way you use it is to create an Actor companion object to hold the dispatcher and then set in in the Actor explicitly. - -.. code-block:: scala - - import akka.dispatch.HawtDispatcher - - object MyActor { - val dispatcher = new HawtDispatcher - } - - class MyActor extends Actor { - self.dispatcher = MyActor.dispatcher - ... - } - -Since a fixed thread pool is being used, an actor using a 'HawtDispatcher' is restricted to executing non blocking operations. For example, the actor is NOT alllowed to: -* synchronously call another actor -* call 3rd party libraries that can block -* use sockets that are in blocking mode - -HawtDispatch supports integrating non-blocking Socket IO events with your actors. Every thread in the HawtDispatch thread pool is parked in an IO event loop when it is not executing an actors. The IO events can be configured to be get delivered to the actor in either the reactor or proactor style. For an example, see `HawtDispacherEchoServer.scala `_. - -A `HawtDispatcher` will aggregate cross actor messages by default. This means that if Actor *A* is executing and sends actor *B* 10 messages, those messages will not be delivered to actor *B* until *A*'s execution ends. HawtDispatch will aggregate the 10 messages into 1 single enqueue operation on to actor *B*'s inbox. This an significantly reduce mailbox contention when actors are very chatty. If you want to avoid this aggregation behavior, then create the `HawtDispatcher` like this: - -.. code-block:: scala - - val dispatcher = new HawtDispatcher(false) - -The `HawtDispatcher` provides a companion object that lets you use more advanced HawtDispatch features. For example to pin an actor so that it always executed on the same thread in the thread poool you would: - -.. code-block:: scala - - val a: ActorRef = ... - HawtDispatcher.pin(a) - -If you have an Actor *b* which will be sending many messages to an Actor *a*, then you may want to consider setting *b*'s dispatch target to be *a*'s dispatch queue. When this is the case, messages sent from *b* to a will avoid cross thread mailbox contention. A side-effect of this is that the *a* and *b* actors will execute as if they shared a single mailbox. - -.. code-block:: scala - - val a: ActorRef = ... - val b: ActorRef = ... - HawtDispatcher.target(b, HawtDispatcher.queue(a)) - Making the Actor mailbox bounded -------------------------------- @@ -245,7 +209,7 @@ You can make the Actor mailbox bounded by a capacity in two ways. Either you def actor { default-dispatcher { mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set to the number specificed + # If positive then a bounded mailbox is used and the capacity is set to the number specified } } @@ -259,7 +223,8 @@ For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingD .. code-block:: scala class MyActor extends Actor { - self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(name, throughput, mailboxCapacity) + val mailboxCapacity = BoundedMailbox(capacity = 100) + self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(name, throughput, mailboxCapacity).build ... } @@ -268,7 +233,9 @@ Making it bounded (by specifying a capacity) is optional, but if you do, you nee ``_ class MyActor extends Actor { - self.dispatcher = Dispatchers.newThreadBasedDispatcher(self, mailboxCapacity, pushTimeout, pushTimeoutUnit) + import akka.util.duration._ + self.dispatcher = Dispatchers.newThreadBasedDispatcher(self, mailboxCapacity = 100, + pushTimeOut = 10 seconds) ... } ``_ diff --git a/akka-docs/pending/fault-tolerance-java.rst b/akka-docs/pending/fault-tolerance-java.rst index 5c6510bcbd..18cbb63e9e 100644 --- a/akka-docs/pending/fault-tolerance-java.rst +++ b/akka-docs/pending/fault-tolerance-java.rst @@ -125,6 +125,36 @@ The Actor’s supervision can be declaratively defined by creating a ‘Supervis Supervisors created like this are implicitly instantiated and started. +To cofigure a handler function for when the actor underlying the supervisor recieves a MaximumNumberOfRestartsWithinTimeRangeReached message, you can specify + a Procedure2 when creating the SupervisorConfig. This handler will be called with the ActorRef of the supervisor and the +MaximumNumberOfRestartsWithinTimeRangeReached message. + +.. code-block:: java + + import static akka.config.Supervision.*; + import static akka.actor.Actors.*; + import akka.event.JavaEventHandler; + + Procedure2 handler = new Procedure2() { + public void apply(ActorRef ref, MaximumNumberOfRestartsWithinTimeRangeReached max) { + JavaEventHandler.error(ref, max); + } + }; + + Supervisor supervisor = new Supervisor( + new SupervisorConfig( + new AllForOneStrategy(new Class[]{Exception.class}, 3, 5000), + new Supervise[] { + new Supervise( + actorOf(MyActor1.class), + permanent()), + Supervise( + actorOf(MyActor2.class), + permanent()) + },handler)); + + + You can link and unlink actors from a declaratively defined supervisor using the 'link' and 'unlink' methods: .. code-block:: java diff --git a/akka-docs/pending/fault-tolerance-scala.rst b/akka-docs/pending/fault-tolerance-scala.rst index 279e69b849..5e02cf232a 100644 --- a/akka-docs/pending/fault-tolerance-scala.rst +++ b/akka-docs/pending/fault-tolerance-scala.rst @@ -121,6 +121,29 @@ The Actor's supervision can be declaratively defined by creating a "Supervisor' Supervisors created like this are implicitly instantiated and started. +To cofigure a handler function for when the actor underlying the supervisor recieves a MaximumNumberOfRestartsWithinTimeRangeReached message, you can specify a function of type +(ActorRef, MaximumNumberOfRestartsWithinTimeRangeReached) => Unit when creating the SupervisorConfig. This handler will be called with the ActorRef of the supervisor and the +MaximumNumberOfRestartsWithinTimeRangeReached message. + + +.. code-block:: scala + + val handler = { + (supervisor:ActorRef,max:MaximumNumberOfRestartsWithinTimeRangeReached) => EventHandler.notify(supervisor,max) + } + + val supervisor = Supervisor( + SupervisorConfig( + AllForOneStrategy(List(classOf[Exception]), 3, 1000), + Supervise( + actorOf[MyActor1], + Permanent) :: + Supervise( + actorOf[MyActor2], + Permanent) :: + Nil), handler) + + You can link and unlink actors from a declaratively defined supervisor using the 'link' and 'unlink' methods: .. code-block:: scala diff --git a/akka-docs/pending/fsm-scala.rst b/akka-docs/pending/fsm-scala.rst deleted file mode 100644 index 9471b39165..0000000000 --- a/akka-docs/pending/fsm-scala.rst +++ /dev/null @@ -1,218 +0,0 @@ -FSM -=== - -Module stability: **STABLE** - -The FSM (Finite State Machine) is available as a mixin for the akka Actor and is best described in the `Erlang design principals <@http://www.erlang.org/documentation/doc-4.8.2/doc/design_principles/fsm.html>`_ - -A FSM can be described as a set of relations of the form: -> **State(S) x Event(E) -> Actions (A), State(S')** - -These relations are interpreted as meaning: -> *If we are in state S and the event E occurs, we should perform the actions A and make a transition to the state S'.* - -State Definitions ------------------ - -To demonstrate the usage of states we start with a simple state only FSM without state data. The state can be of any type so for this example we create the states A, B and C. - -.. code-block:: scala - - sealed trait ExampleState - case object A extends ExampleState - case object B extends ExampleState - case object C extends ExampleState - -Now lets create an object to influence the FSM and define the states and their behaviour. - -.. code-block:: scala - - import akka.actor.{Actor, FSM} - import FSM._ - import akka.util.duration._ - - case object Move - - class ABC extends Actor with FSM[ExampleState,Unit] { - - startWith(A, Unit) - - when(A) { - case Event(Move, _) => - log.info("Go to B and move on after 5 seconds") - goto(B) forMax (5 seconds) - } - - when(B) { - case Event(StateTimeout, _) => - log.info("Moving to C") - goto(C) - } - - when(C) { - case Event(Move, _) => - log.info("Stopping") - stop - } - - initialize // this checks validity of the initial state and sets up timeout if needed - } - -So we use 'when' to specify a state and define what needs to happen when we receive an event. We use 'goto' to go to another state. We use 'forMax' to tell for how long we maximum want to stay in that state before we receive a timeout notification. We use 'stop' to stop the FSM. And we use 'startWith' to specify which state to start with. The call to 'initialize' should be the last action done in the actor constructor. - -If we want to stay in the current state we can use (I'm hoping you can guess this by now) 'stay'. That can also be combined with the 'forMax' - -.. code-block:: scala - - when(C) { - case Event(unknown, _) => - stay forMax (2 seconds) - } - -The timeout can also be associated with the state itself, the choice depends on whether most of the transitions to the state require the same value for the timeout: - -.. code-block:: scala - - when(A) { - case Ev(Start(msg)) => // convenience extractor when state data not needed - goto(Timer) using msg - } - - when(B, stateTimeout = 12 seconds) { - case Event(StateTimeout, msg) => - target ! msg - case Ev(DifferentPause(dur : Duration)) => - stay forMax dur // overrides default state timeout for this single transition - } - -Unhandled Events ----------------- - -If a state doesn't handle a received event a warning is logged. If you want to do something with this events you can specify that with 'whenUnhandled' - -.. code-block:: scala - - whenUnhandled { - case Event(x, _) => log.info("Received unhandled event: " + x) - } - -Termination ------------ - -You can use 'onTermination' to specify custom code that is executed when the FSM is stopped. A reason is passed to tell how the FSM was stopped. - -.. code-block:: scala - - onTermination { - case Normal => log.info("Stopped normal") - case Shutdown => log.info("Stopped because of shutdown") - case Failure(cause) => log.error("Stopped because of failure: " + cause) - } - -State Transitions ------------------ - -When state transitions to another state we might want to know about this and take action. To specify this we can use 'onTransition' to capture the transitions. - -.. code-block:: scala - - onTransition { - case A -> B => log.info("Moving from A to B") - case _ -> C => log.info("Moving from something to C") - } - -Multiple onTransition blocks may be given and all will be execution while processing a transition. This enables you to associate your Actions either with the initial state of a processing step, or with the transition into the final state of a processing step. - -Transitions occur "between states" conceptually, which means after any actions you have put into the event handling block; this is obvious since the next state is only defined by the value returned by the event handling logic. You do not need to worry about the exact order with respect to setting the internal state variable, as everything within the FSM actor is running single-threaded anyway. - -It is also possible to pass a function object accepting two states to onTransition, in case your state handling logic is implemented as a method: - -.. code-block:: scala - - onTransition(handler _) - - private def handler(from: State, to: State) { - ... - } - -State Data ----------- - -The FSM can also hold state data that is attached to every event. The state data can be of any type but to demonstrate let's look at a lock with a String as state data holding the entered unlock code. -First we need two states for the lock: - -.. code-block:: scala - - sealed trait LockState - case object Locked extends LockState - case object Open extends LockState - -Now we can create a lock FSM that takes LockState as a state and a String as state data: - -.. code-block:: scala - - import akka.actor.{FSM, Actor} - import FSM._ - import akka.util.duration._ - - class Lock(code: String) extends Actor with FSM[LockState, String] { - - val emptyCode = "" - - when(Locked) { - // receive a digit and the code that we have so far - case Event(digit: Char, soFar) => { - // add the digit to what we have - soFar + digit match { - // not enough digits yet so stay using the incomplete code as the new state data - case incomplete if incomplete.length < code.length => - stay using incomplete - // code matched the one from the lock so go to Open state and reset the state data - case `code` => - log.info("Unlocked") - goto(Open) using emptyCode forMax (1 seconds) - // wrong code, stay Locked and reset the state data - case wrong => - log.error("Wrong code " + wrong) - stay using emptyCode - } - } - } - - when(Open) { - // after the timeout, go back to Locked state - case Event(StateTimeout, _) => { - log.info("Locked") - goto(Locked) - } - } - - startWith(Locked, emptyCode) - } - -To use the Lock you can run a small program like this: - -.. code-block:: scala - - object Lock { - - def main(args: Array[String]) { - - val lock = Actor.actorOf(new Lock("1234")).start() - - lock ! '1' - lock ! '2' - lock ! '3' - lock ! '4' - - Actor.registry.shutdownAll() - exit - } - } - -Dining Hakkers --------------- - -A bigger FSM example can be found in the sources. -`Dining Hakkers using FSM <@https://github.com/jboner/akka/blob/master/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala#L1>`_ -`Dining Hakkers using become <@https://github.com/jboner/akka/blob/master/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala#L1>`_ diff --git a/akka-docs/pending/futures-scala.rst b/akka-docs/pending/futures-scala.rst index 5cbfc08cea..3426ce0ff6 100644 --- a/akka-docs/pending/futures-scala.rst +++ b/akka-docs/pending/futures-scala.rst @@ -158,24 +158,24 @@ This is fine when dealing with a known amount of Actors, but can grow unwieldly val listOfFutures: List[Future[Int]] = List.fill(100)(oddActor !!! GetNext) // now we have a Future[List[Int]] - val futureList = Futures.sequence(listOfFutures) + val futureList = Future.sequence(listOfFutures) // Find the sum of the odd numbers val oddSum = futureList.map(_.sum).apply -To better explain what happened in the example, Futures.sequence is taking the List[Future[Int]] and turning it into a Future[List[Int]]. We can then use 'map' to work with the List[Int] directly, and we find the sum of the List. +To better explain what happened in the example, Future.sequence is taking the List[Future[Int]] and turning it into a Future[List[Int]]. We can then use 'map' to work with the List[Int] directly, and we find the sum of the List. The 'traverse' method is similar to 'sequence', but it takes a Traversable[A] and a Function T => Future[B] to return a Future[Traversable[B]]. For example, to use 'traverse' to sum the first 100 odd numbers: .. code-block:: scala - val oddSum = Futures.traverse((1 to 100).toList)(x => Future(x * 2 - 1)).map(_.sum).apply + val oddSum = Future.traverse((1 to 100).toList)(x => Future(x * 2 - 1)).map(_.sum).apply This is the same result as this example: .. code-block:: scala - val oddSum = Futures.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))).map(_.sum).apply + val oddSum = Future.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))).map(_.sum).apply But it may be faster to use 'traverse' as it doesn't have to create an intermediate List[Future[Int]]. diff --git a/akka-docs/pending/actors-scala.rst b/akka-docs/scala/actors.rst similarity index 99% rename from akka-docs/pending/actors-scala.rst rename to akka-docs/scala/actors.rst index fc456ba71e..70f9e0cfcc 100644 --- a/akka-docs/pending/actors-scala.rst +++ b/akka-docs/scala/actors.rst @@ -1,5 +1,5 @@ -Actors (Scala) -============== +Actors +====== Module stability: **SOLID** diff --git a/akka-docs/scala/fsm.rst b/akka-docs/scala/fsm.rst new file mode 100644 index 0000000000..a23fd9edf4 --- /dev/null +++ b/akka-docs/scala/fsm.rst @@ -0,0 +1,484 @@ +FSM +=== + +.. sidebar:: Contents + + .. contents:: :local: + +.. module:: FSM + :platform: Scala + :synopsis: Finite State Machine DSL on top of Actors +.. moduleauthor:: Irmo Manie, Roland Kuhn +.. versionadded:: 1.0 + +Module stability: **STABLE** + +Overview +++++++++ + +The FSM (Finite State Machine) is available as a mixin for the akka Actor and +is best described in the `Erlang design principles +`_ + +A FSM can be described as a set of relations of the form: + + **State(S) x Event(E) -> Actions (A), State(S')** + +These relations are interpreted as meaning: + + *If we are in state S and the event E occurs, we should perform the actions A and make a transition to the state S'.* + +A Simple Example +++++++++++++++++ + +To demonstrate the usage of states we start with a simple FSM without state +data. The state can be of any type so for this example we create the states A, +B and C. + +.. code-block:: scala + + sealed trait ExampleState + case object A extends ExampleState + case object B extends ExampleState + case object C extends ExampleState + +Now lets create an object representing the FSM and defining the behaviour. + +.. code-block:: scala + + import akka.actor.{Actor, FSM} + import akka.event.EventHandler + import FSM._ + import akka.util.duration._ + + case object Move + + class ABC extends Actor with FSM[ExampleState, Unit] { + + startWith(A, Unit) + + when(A) { + case Ev(Move) => + EventHandler.info(this, "Go to B and move on after 5 seconds") + goto(B) forMax (5 seconds) + } + + when(B) { + case Ev(StateTimeout) => + EventHandler.info(this, "Moving to C") + goto(C) + } + + when(C) { + case Ev(Move) => + EventHandler.info(this, "Stopping") + stop + } + + initialize // this checks validity of the initial state and sets up timeout if needed + } + +Each state is described by one or more :func:`when(state)` blocks; if more than +one is given for the same state, they are tried in the order given until the +first is found which matches the incoming event. Events are matched using +either :func:`Ev(msg)` (if no state data are to be extracted) or +:func:`Event(msg, data)`, see below. The statements for each case are the +actions to be taken, where the final expression must describe the transition +into the next state. This can either be :func:`stay` when no transition is +needed or :func:`goto(target)` for changing into the target state. The +transition may be annotated with additional properties, where this example +includes a state timeout of 5 seconds after the transition into state B: +:func:`forMax(duration)` arranges for a :obj:`StateTimeout` message to be +scheduled, unless some other message is received first. The construction of the +FSM is finished by calling the :func:`initialize` method as last part of the +ABC constructor. + +State Data +++++++++++ + +The FSM can also hold state data associated with the internal state of the +state machine. The state data can be of any type but to demonstrate let's look +at a lock with a :class:`String` as state data holding the entered unlock code. +First we need two states for the lock: + +.. code-block:: scala + + sealed trait LockState + case object Locked extends LockState + case object Open extends LockState + +Now we can create a lock FSM that takes :class:`LockState` as a state and a +:class:`String` as state data: + +.. code-block:: scala + + class Lock(code: String) extends Actor with FSM[LockState, String] { + + val emptyCode = "" + + startWith(Locked, emptyCode) + + when(Locked) { + // receive a digit and the code that we have so far + case Event(digit: Char, soFar) => { + // add the digit to what we have + soFar + digit match { + case incomplete if incomplete.length < code.length => + // not enough digits yet so stay using the incomplete code as the new state data + stay using incomplete + case `code` => + // code matched the one from the lock so go to Open state and reset the state data + goto(Open) using emptyCode forMax (1 seconds) + case wrong => + // wrong code, stay Locked and reset the state data + stay using emptyCode + } + } + } + + when(Open) { + case Ev(StateTimeout, _) => { + // after the timeout, go back to Locked state + goto(Locked) + } + } + + initialize + } + +This very simple example shows how the complete state of the FSM is encoded in +the :obj:`(State, Data)` pair and only explicitly updated during transitions. +This encapsulation is what makes state machines a powerful abstraction, e.g. +for handling socket states in a network server application. + +Reference ++++++++++ + +This section describes the DSL in a more formal way, refer to `Examples`_ for more sample material. + +The FSM Trait and Object +------------------------ + +The :class:`FSM` trait may only be mixed into an :class:`Actor`. Instead of +extending :class:`Actor`, the self type approach was chosen in order to make it +obvious that an actor is actually created. Importing all members of the +:obj:`FSM` object is recommended to receive useful implicits and directly +access the symbols like :obj:`StateTimeout`. This import is usually placed +inside the state machine definition: + +.. code-block:: scala + + class MyFSM extends Actor with FSM[State, Data] { + import FSM._ + + ... + + } + +The :class:`FSM` trait takes two type parameters: + + #. the supertype of all state names, usually a sealed trait with case objects + extending it, + #. the type of the state data which are tracked by the :class:`FSM` module + itself. + +.. _fsm-philosophy: + +.. note:: + + The state data together with the state name describe the internal state of + the state machine; if you stick to this scheme and do not add mutable fields + to the FSM class you have the advantage of making all changes of the + internal state explicit in a few well-known places. + +Defining Timeouts +----------------- + +The :class:`FSM` module uses :class:`akka.util.Duration` for all timing +configuration, which includes a mini-DSL: + +.. code-block:: scala + + import akka.util.duration._ // notice the small d + + val fivesec = 5.seconds + val threemillis = 3.millis + val diff = fivesec - threemillis + +.. note:: + + You may leave out the dot if the expression is clearly delimited (e.g. + within parentheses or in an argument list), but it is recommended to use it + if the time unit is the last token on a line, otherwise semi-colon inference + might go wrong, depending on what starts the next line. + +Several methods, like :func:`when()` and :func:`startWith()` take a +:class:`FSM.Timeout`, which is an alias for :class:`Option[Duration]`. There is +an implicit conversion available in the :obj:`FSM` object which makes this +transparent, just import it into your FSM body. + +Defining States +--------------- + +A state is defined by one or more invocations of the method + + :func:`when([, stateTimeout = ])(stateFunction)`. + +The given name must be an object which is type-compatible with the first type +parameter given to the :class:`FSM` trait. This object is used as a hash key, +so you must ensure that it properly implements :meth:`equals` and +:meth:`hashCode`; in particular it must not be mutable. The easiest fit for +these requirements are case objects. + +If the :meth:`stateTimeout` parameter is given, then all transitions into this +state, including staying, receive this timeout by default. Initiating the +transition with an explicit timeout may be used to override this default, see +`Initiating Transitions`_ for more information. The state timeout of any state +may be changed during action processing with :func:`setStateTimeout(state, +duration)`. This enables runtime configuration e.g. via external message. + +The :meth:`stateFunction` argument is a :class:`PartialFunction[Event, State]`, +which is conveniently given using the partial function literal syntax as +demonstrated below: + +.. code-block:: scala + + when(Idle) { + case Ev(Start(msg)) => // convenience extractor when state data not needed + goto(Timer) using (msg, self.channel) + } + + when(Timer, stateTimeout = 12 seconds) { + case Event(StateTimeout, (msg, channel)) => + channel ! msg + goto(Idle) + } + +The :class:`Event(msg, data)` case class may be used directly in the pattern as +shown in state Idle, or you may use the extractor :obj:`Ev(msg)` when the state +data are not needed. + +Defining the Initial State +-------------------------- + +Each FSM needs a starting point, which is declared using + + :func:`startWith(state, data[, timeout])` + +The optionally given timeout argument overrides any specification given for the +desired initial state. If you want to cancel a default timeout, use +:obj:`Duration.Inf`. + +Unhandled Events +---------------- + +If a state doesn't handle a received event a warning is logged. If you want to +do something else in this case you can specify that with +:func:`whenUnhandled(stateFunction)`: + +.. code-block:: scala + + whenUnhandled { + case Event(x : X, data) => + EventHandler.info(this, "Received unhandled event: " + x) + stay + case Ev(msg) => + EventHandler.warn(this, "Received unknown event: " + x) + goto(Error) + } + +**IMPORTANT**: This handler is not stacked, meaning that each invocation of +:func:`whenUnhandled` replaces the previously installed handler. + +Initiating Transitions +---------------------- + +The result of any :obj:`stateFunction` must be a definition of the next state +unless terminating the FSM, which is described in `Termination`_. The state +definition can either be the current state, as described by the :func:`stay` +directive, or it is a different state as given by :func:`goto(state)`. The +resulting object allows further qualification by way of the modifiers described +in the following: + +:meth:`forMax(duration)` + This modifier sets a state timeout on the next state. This means that a timer + is started which upon expiry sends a :obj:`StateTimeout` message to the FSM. + This timer is canceled upon reception of any other message in the meantime; + you can rely on the fact that the :obj:`StateTimeout` message will not be + processed after an intervening message. + + This modifier can also be used to override any default timeout which is + specified for the target state. If you want to cancel the default timeout, + use :obj:`Duration.Inf`. + +:meth:`using(data)` + This modifier replaces the old state data with the new data given. If you + follow the advice :ref:`above `, this is the only place where + internal state data are ever modified. + +:meth:`replying(msg)` + This modifier sends a reply to the currently processed message and otherwise + does not modify the state transition. + +All modifier can be chained to achieve a nice and concise description: + +.. code-block:: scala + + when(State) { + case Ev(msg) => + goto(Processing) using (msg) forMax (5 seconds) replying (WillDo) + } + +The parentheses are not actually needed in all cases, but they visually +distinguish between modifiers and their arguments and therefore make the code +even more pleasant to read for foreigners. + +Monitoring Transitions +---------------------- + +Transitions occur "between states" conceptually, which means after any actions +you have put into the event handling block; this is obvious since the next +state is only defined by the value returned by the event handling logic. You do +not need to worry about the exact order with respect to setting the internal +state variable, as everything within the FSM actor is running single-threaded +anyway. + +Internal Monitoring +******************* + +Up to this point, the FSM DSL has been centered on states and events. The dual +view is to describe it as a series of transitions. This is enabled by the +method + + :func:`onTransition(handler)` + +which associates actions with a transition instead of with a state and event. +The handler is a partial function which takes a pair of states as input; no +resulting state is needed as it is not possible to modify the transition in +progress. + +.. code-block:: scala + + onTransition { + case Idle -> Active => setTimer("timeout") + case Active -> _ => cancelTimer("timeout") + case x -> Idle => EventHandler.info("entering Idle from "+x) + } + +The convenience extractor :obj:`->` enables decomposition of the pair of states +with a clear visual reminder of the transition's direction. As usual in pattern +matches, an underscore may be used for irrelevant parts; alternatively you +could bind the unconstrained state to a variable, e.g. for logging as shown in +the last case. + +It is also possible to pass a function object accepting two states to +:func:`onTransition`, in case your transition handling logic is implemented as +a method: + +.. code-block:: scala + + onTransition(handler _) + + private def handler(from: State, to: State) { + ... + } + +The handlers registered with this method are stacked, so you can intersperse +:func:`onTransition` blocks with :func:`when` blocks as suits your design. It +should be noted, however, that *all handlers will be invoked for each +transition*, not only the first matching one. This is designed specifically so +you can put all transition handling for a certain aspect into one place without +having to worry about earlier declarations shadowing later ones; the actions +are still executed in declaration order, though. + +.. note:: + + This kind of internal monitoring may be used to structure your FSM according + to transitions, so that for example the cancellation of a timer upon leaving + a certain state cannot be forgot when adding new target states. + +External Monitoring +******************* + +External actors may be registered to be notified of state transitions by +sending a message :class:`SubscribeTransitionCallBack(actorRef)`. The named +actor will be sent a :class:`CurrentState(self, stateName)` message immediately +and will receive :class:`Transition(actorRef, oldState, newState)` messages +whenever a new state is reached. External monitors may be unregistered by +sending :class:`UnsubscribeTransitionCallBack(actorRef)` to the FSM actor. + +Registering a not-running listener generates a warning and fails gracefully. +Stopping a listener without unregistering will remove the listener from the +subscription list upon the next transition. + +Timers +------ + +Besides state timeouts, FSM manages timers identified by :class:`String` names. +You may set a timer using + + :func:`setTimer(name, msg, interval, repeat)` + +where :obj:`msg` is the message object which will be sent after the duration +:obj:`interval` has elapsed. If :obj:`repeat` is :obj:`true`, then the timer is +scheduled at fixed rate given by the :obj:`interval` parameter. Timers may be +canceled using + + :func:`cancelTimer(name)` + +which is guaranteed to work immediately, meaning that the scheduled message +will not be processed after this call even if the timer already fired and +queued it. The status of any timer may be inquired with + + :func:`timerActive_?(name)` + +These named timers complement state timeouts because they are not affected by +intervening reception of other messages. + +Termination +----------- + +The FSM is stopped by specifying the result state as + + :func:`stop([reason[, data]])` + +The reason must be one of :obj:`Normal` (which is the default), :obj:`Shutdown` +or :obj:`Failure(reason)`, and the second argument may be given to change the +state data which is available during termination handling. + +.. note:: + + It should be noted that :func:`stop` does not abort the actions and stop the + FSM immediately. The stop action must be returned from the event handler in + the same way as a state transition. + +.. code-block:: scala + + when(A) { + case Ev(Stop) => + doCleanup() + stop() + } + +You can use :func:`onTermination(handler)` to specify custom code that is +executed when the FSM is stopped. The handler is a partial function which takes +a :class:`StopEvent(reason, stateName, stateData)` as argument: + +.. code-block:: scala + + onTermination { + case StopEvent(Normal, s, d) => ... + case StopEvent(Shutdown, _, _) => ... + case StopEvent(Failure(cause), s, d) => ... + } + +As for the :func:`whenUnhandled` case, this handler is not stacked, so each +invocation of :func:`onTermination` replaces the previously installed handler. + +Examples +++++++++ + +A bigger FSM example can be found in the sources: + + * `Dining Hakkers using FSM `_ + * `Dining Hakkers using become `_ diff --git a/akka-docs/scala/index.rst b/akka-docs/scala/index.rst new file mode 100644 index 0000000000..645efccf41 --- /dev/null +++ b/akka-docs/scala/index.rst @@ -0,0 +1,8 @@ +Scala API +========= + +.. toctree:: + :maxdepth: 2 + + actors + fsm diff --git a/akka-docs/pending/migration-guide-0.7.x-0.8.x.rst b/akka-docs/scala/migration-guide-0.7.x-0.8.x.rst similarity index 100% rename from akka-docs/pending/migration-guide-0.7.x-0.8.x.rst rename to akka-docs/scala/migration-guide-0.7.x-0.8.x.rst diff --git a/akka-docs/pending/migration-guide-0.8.x-0.9.x.rst b/akka-docs/scala/migration-guide-0.8.x-0.9.x.rst similarity index 97% rename from akka-docs/pending/migration-guide-0.8.x-0.9.x.rst rename to akka-docs/scala/migration-guide-0.8.x-0.9.x.rst index 81866e1993..359cb01602 100644 --- a/akka-docs/pending/migration-guide-0.8.x-0.9.x.rst +++ b/akka-docs/scala/migration-guide-0.8.x-0.9.x.rst @@ -133,7 +133,7 @@ If you are also using Protobuf then you can use the methods that work with Proto val actorRef2 = ActorRef.fromProtocol(protobufMessage) - Camel +Camel ====== Some methods of the se.scalablesolutions.akka.camel.Message class have been deprecated in 0.9. These are @@ -163,7 +163,8 @@ They will be removed in 1.0. Instead use } Usage example: -``_ -val m = Message(1.4) -val b = m.bodyAs[String] -``_ +.. code-block:: scala + + val m = Message(1.4) + val b = m.bodyAs[String] + diff --git a/akka-docs/pending/migration-guide-0.9.x-0.10.x.rst b/akka-docs/scala/migration-guide-0.9.x-0.10.x.rst similarity index 100% rename from akka-docs/pending/migration-guide-0.9.x-0.10.x.rst rename to akka-docs/scala/migration-guide-0.9.x-0.10.x.rst diff --git a/akka-docs/scala/migration-guide-1.0.x-1.1.x.rst b/akka-docs/scala/migration-guide-1.0.x-1.1.x.rst new file mode 100644 index 0000000000..c32b2545ac --- /dev/null +++ b/akka-docs/scala/migration-guide-1.0.x-1.1.x.rst @@ -0,0 +1,37 @@ +Akka has now moved to Scala 2.9.x +^^^^^^^^^^^^^^^^^^^^ + +Akka HTTP +========= + +# akka.servlet.Initializer has been moved to ``akka-kernel`` to be able to have ``akka-http`` not depend on ``akka-remote``, if you don't want to use the class for kernel, just create your own version of ``akka.servlet.Initializer``, it's just a couple of lines of code and there is instructions here: `Akka Http Docs `_ +# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter `_ +# Jersey-server is now a "provided" dependency for ``akka-http``, so you'll need to add the dependency to your project, it's built against Jersey 1.3 + +Akka Actor +========== + +# is now dependency free, with the exception of the dependency on the ``scala-library.jar`` +# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.aevent.EventHandler or by specifying the ``FQN`` of an Actor in the akka.conf under akka.event-handlers; there is an ``akka-slf4j`` module which still provides the Logging trait and a default ``SLF4J`` logger adapter. +Don't forget to add a SLF4J backend though, we recommend: + +.. code-block:: scala + lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" + +# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: ``akka.dispatch.HawtDispatcherConfigurator`` instead of ``HawtDispatcher`` +# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value). + +Akka Typed Actor +================ + +All methods starting with 'get*' are deprecated and will be removed in post 1.1 release. + +Akka Remote +=========== + +# ``UnparsebleException`` has been renamed to ``CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message)`` + +Akka Testkit +============ + +The TestKit moved into the akka-testkit subproject and correspondingly into the ``akka.testkit` package. diff --git a/akka-docs/pending/migration-guides.rst b/akka-docs/scala/migration-guides.rst similarity index 85% rename from akka-docs/pending/migration-guides.rst rename to akka-docs/scala/migration-guides.rst index 4c44977d2f..361f8e3c7a 100644 --- a/akka-docs/pending/migration-guides.rst +++ b/akka-docs/scala/migration-guides.rst @@ -5,4 +5,4 @@ Here are migration guides for the latest releases * `Migrate 0.8.x -> 0.9.x `_ * `Migrate 0.9.x -> 0.10.x `_ * `Migrate 0.10.x -> 1.0.x `_ -* `Migrate 1.0.x -> 1.1.x `_ +* `Migrate 1.0.x -> 1.1.x `_ diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 64e26cfdf5..8781c72ecd 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -1299,4 +1299,4 @@ class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(na throw new IllegalStateException("ChannelGroup already closed, cannot add new channel") } } -} +} \ No newline at end of file diff --git a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala index b8f4eb2748..a47c895027 100644 --- a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala +++ b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala @@ -189,7 +189,7 @@ class ServerInitiatedRemoteActorSpec extends AkkaRemoteTest { while(!testDone()) { if (latch.await(200, TimeUnit.MILLISECONDS)) - error("Test didn't complete within 100 cycles") + sys.error("Test didn't complete within 100 cycles") else latch.countDown() } diff --git a/akka-samples/akka-sample-osgi/src/main/scala/OsgiExample.scala b/akka-samples/akka-sample-osgi/src/main/scala/OsgiExample.scala new file mode 100644 index 0000000000..77134e6e3c --- /dev/null +++ b/akka-samples/akka-sample-osgi/src/main/scala/OsgiExample.scala @@ -0,0 +1,31 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ +package sample.osgi + +import akka.actor.{ Actor, ActorRegistry } +import Actor._ + +import org.osgi.framework.{ BundleActivator, BundleContext } + +class Activator extends BundleActivator { + + def start(context: BundleContext) { + println("Starting the OSGi example ...") + val echo = actorOf[EchoActor].start() + val answer = (echo !! "OSGi example") + println(answer getOrElse "No answer!") + } + + def stop(context: BundleContext) { + Actor.registry.shutdownAll() + println("Stopped the OSGi example.") + } +} + +class EchoActor extends Actor { + + override def receive = { + case x => self.reply(x) + } +} diff --git a/akka-tutorials/akka-tutorial-first/pom.xml b/akka-tutorials/akka-tutorial-first/pom.xml new file mode 100644 index 0000000000..f3d9589815 --- /dev/null +++ b/akka-tutorials/akka-tutorial-first/pom.xml @@ -0,0 +1,43 @@ + + + 4.0.0 + + akka-tutorial-first-java + akka.tutorial.first.java + akka-tutorial-first-java + jar + 1.0-SNAPSHOT + http://akka.io + + + + se.scalablesolutions.akka + akka-actor + 1.1-SNAPSHOT + + + + + + Akka + Akka Maven2 Repository + http://www.scalablesolutions.se/akka/repository/ + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 2.3.2 + + 1.6 + 1.6 + + + + + diff --git a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/java/first/Pi.java b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java similarity index 81% rename from akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/java/first/Pi.java rename to akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java index 1b2dd5e941..8c0085fb97 100644 --- a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/java/first/Pi.java +++ b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2011 Scalable Solutions AB */ -package akka.tutorial.java.first; +package akka.tutorial.first.java; import static akka.actor.Actors.actorOf; import static akka.actor.Actors.poisonPill; @@ -27,8 +27,8 @@ import java.util.concurrent.CountDownLatch; *
  *   $ cd akka-1.1
  *   $ export AKKA_HOME=`pwd`
- *   $ javac -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar akka/tutorial/java/first/Pi.java
- *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.java.first.Pi
+ *   $ javac -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar akka/tutorial/first/java/Pi.java
+ *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.first.java.Pi
  *   $ ...
  * 
*

@@ -36,7 +36,7 @@ import java.util.concurrent.CountDownLatch; *

  *   $ mvn
  *   > scala:console
- *   > val pi = new akka.tutorial.java.first.Pi
+ *   > val pi = new akka.tutorial.first.java.Pi
  *   > pi.calculate(4, 10000, 10000)
  *   > ...
  * 
@@ -56,15 +56,15 @@ public class Pi { static class Calculate {} static class Work { - private final int arg; + private final int start; private final int nrOfElements; - public Work(int arg, int nrOfElements) { - this.arg = arg; + public Work(int start, int nrOfElements) { + this.start = start; this.nrOfElements = nrOfElements; } - public int getArg() { return arg; } + public int getStart() { return start; } public int getNrOfElements() { return nrOfElements; } } @@ -84,10 +84,10 @@ public class Pi { static class Worker extends UntypedActor { // define the work - private double calculatePiFor(int arg, int nrOfElements) { + private double calculatePiFor(int start, int nrOfElements) { double acc = 0.0; - for (int i = arg * nrOfElements; i <= ((arg + 1) * nrOfElements - 1); i++) { - acc += 4 * Math.pow(-1, i) / (2 * i + 1); + for (int i = start * nrOfElements; i <= ((start + 1) * nrOfElements - 1); i++) { + acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1); } return acc; } @@ -96,7 +96,13 @@ public class Pi { public void onReceive(Object message) { if (message instanceof Work) { Work work = (Work) message; - getContext().replyUnsafe(new Result(calculatePiFor(work.getArg(), work.getNrOfElements()))); // perform the work + + // perform the work + double result = calculatePiFor(work.getStart(), work.getNrOfElements()); + + // reply with the result + getContext().replyUnsafe(new Result(result)); + } else throw new IllegalArgumentException("Unknown message [" + message + "]"); } } @@ -151,8 +157,8 @@ public class Pi { if (message instanceof Calculate) { // schedule work - for (int arg = 0; arg < nrOfMessages; arg++) { - router.sendOneWay(new Work(arg, nrOfElements), getContext()); + for (int start = 0; start < nrOfMessages; start++) { + router.sendOneWay(new Work(start, nrOfElements), getContext()); } // send a PoisonPill to all workers telling them to shut down themselves @@ -180,7 +186,9 @@ public class Pi { @Override public void postStop() { // tell the world that the calculation is complete - System.out.println(String.format("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis", pi, (System.currentTimeMillis() - start))); + System.out.println(String.format( + "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis", + pi, (System.currentTimeMillis() - start))); latch.countDown(); } } @@ -188,9 +196,10 @@ public class Pi { // ================== // ===== Run it ===== // ================== - public void calculate(final int nrOfWorkers, final int nrOfElements, final int nrOfMessages) throws Exception { + public void calculate(final int nrOfWorkers, final int nrOfElements, final int nrOfMessages) + throws Exception { - // this latch is only plumbing to kSystem.currentTimeMillis(); when the calculation is completed + // this latch is only plumbing to know when the calculation is completed final CountDownLatch latch = new CountDownLatch(1); // create the master diff --git a/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala b/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala index c31f8ee2f6..c16c53f995 100644 --- a/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala +++ b/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2011 Scalable Solutions AB */ -package akka.tutorial.scala.first +package akka.tutorial.first.scala import akka.actor.{Actor, PoisonPill} import Actor._ @@ -22,7 +22,7 @@ import java.util.concurrent.CountDownLatch * $ cd akka-1.1 * $ export AKKA_HOME=`pwd` * $ scalac -cp dist/akka-actor-1.1-SNAPSHOT.jar Pi.scala - * $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.scala.first.Pi + * $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.first.scala.Pi * $ ... * *

@@ -31,7 +31,7 @@ import java.util.concurrent.CountDownLatch * $ sbt * > update * > console - * > akka.tutorial.scala.first.Pi.calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) + * > akka.tutorial.first.scala.Pi.calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) * > ... * > :quit * @@ -59,7 +59,7 @@ object Pi extends App { def calculatePiFor(start: Int, nrOfElements: Int): Double = { var acc = 0.0 for (i <- start until (start + nrOfElements)) - acc += 4 * math.pow(-1, i) / (2 * i + 1) + acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1) acc } @@ -89,7 +89,6 @@ object Pi extends App { def receive = { case Calculate => // schedule work - //for (arg <- 0 until nrOfMessages) router ! Work(arg, nrOfElements) for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements) // send a PoisonPill to all workers telling them to shut down themselves @@ -106,12 +105,14 @@ object Pi extends App { } override def preStart { - start = now + start = System.currentTimeMillis } override def postStop { // tell the world that the calculation is complete - println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start))) + println( + "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" + .format(pi, (System.currentTimeMillis - start))) latch.countDown() } } diff --git a/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala index 800385545b..7103c3fd5b 100644 --- a/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala @@ -996,14 +996,20 @@ private[akka] abstract class ActorAspect { None) //TODO: REVISIT: Use another classloader? if (isOneWay) null // for void methods + else if (future.isEmpty) throw new IllegalActorStateException("No future returned from call to [" + joinPoint + "]") else if (TypedActor.returnsFuture_?(methodRtti)) future.get + else if (TypedActor.returnsOption_?(methodRtti)) { + import akka.japi.{Option => JOption} + future.get.await.resultOrException.as[JOption[AnyRef]] match { + case None => JOption.none[AnyRef] + case Some(x) if ((x eq null) || x.isEmpty) => JOption.some[AnyRef](null) + case Some(x) => x + } + } else { - if (future.isDefined) { - future.get.await - val result = future.get.resultOrException - if (result.isDefined) result.get - else throw new IllegalActorStateException("No result returned from call to [" + joinPoint + "]") - } else throw new IllegalActorStateException("No future returned from call to [" + joinPoint + "]") + val result = future.get.await.resultOrException + if(result.isDefined) result.get + else throw new IllegalActorStateException("No result returned from call to [" + joinPoint + "]") } } diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala index 7666cc7543..8a93603b0d 100644 --- a/project/build/AkkaProject.scala +++ b/project/build/AkkaProject.scala @@ -154,13 +154,15 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { lazy val netty = "org.jboss.netty" % "netty" % "3.2.3.Final" % "compile" //ApacheV2 + lazy val osgi_core = "org.osgi" % "org.osgi.core" % "4.2.0" //ApacheV2 + lazy val protobuf = "com.google.protobuf" % "protobuf-java" % "2.3.0" % "compile" //New BSD lazy val sjson = "net.debasishg" % "sjson_2.9.0.RC1" % "0.11" % "compile" //ApacheV2 lazy val sjson_test = "net.debasishg" % "sjson_2.9.0.RC1" % "0.11" % "test" //ApacheV2 - lazy val slf4j = "org.slf4j" % "slf4j-api" % "1.6.0" - lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.24" + lazy val slf4j = "org.slf4j" % "slf4j-api" % SLF4J_VERSION + lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime" // Test @@ -271,7 +273,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // akka-actor subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaActorProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { + class AkkaActorProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) with OsgiProject { override def bndExportPackage = super.bndExportPackage ++ Seq("com.eaio.*;version=3.2") } @@ -329,8 +331,6 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { if (!networkTestsEnabled.value) Seq(TestFilter(test => !test.endsWith("NetworkTest"))) else Seq.empty } - - override def bndImportPackage = "javax.transaction;version=1.1" :: super.bndImportPackage.toList } // ------------------------------------------------------------------------------------------------------------------- @@ -377,6 +377,12 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { class AkkaSampleFSMProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath) + class AkkaSampleOsgiProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath) with BNDPlugin { + val osgiCore = Dependencies.osgi_core + override protected def bndPrivatePackage = List("sample.osgi.*") + override protected def bndBundleActivator = Some("sample.osgi.Activator") + } + class AkkaSamplesParentProject(info: ProjectInfo) extends ParentProject(info) { override def disableCrossPaths = true @@ -388,6 +394,8 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { new AkkaSampleRemoteProject(_), akka_remote) lazy val akka_sample_chat = project("akka-sample-chat", "akka-sample-chat", new AkkaSampleChatProject(_), akka_remote) + lazy val akka_sample_osgi = project("akka-sample-osgi", "akka-sample-osgi", + new AkkaSampleOsgiProject(_), akka_actor) lazy val publishRelease = { val releaseConfiguration = new DefaultPublishConfiguration(localReleaseRepository, "release", false) @@ -442,7 +450,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // ------------------------------------------------------------------------------------------------------------------- class AkkaSlf4jProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { - val sjson = Dependencies.slf4j + val slf4j = Dependencies.slf4j } // ------------------------------------------------------------------------------------------------------------------- @@ -475,8 +483,8 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { def akkaArtifacts = descendents(info.projectPath / "dist", "*-" + version + ".jar") // ------------------------------------------------------------ - class AkkaDefaultProject(info: ProjectInfo, val deployPath: Path) extends DefaultProject(info) - with DeployProject with OSGiProject with McPom { + class AkkaDefaultProject(info: ProjectInfo, val deployPath: Path) extends DefaultProject(info) with DeployProject with McPom { + override def disableCrossPaths = true override def compileOptions = super.compileOptions ++ scalaCompileSettings.map(CompileOption) @@ -528,7 +536,7 @@ trait DeployProject { self: BasicScalaProject => } } -trait OSGiProject extends BNDPlugin { self: DefaultProject => +trait OsgiProject extends BNDPlugin { self: DefaultProject => override def bndExportPackage = Seq("akka.*;version=%s".format(projectVersion.value)) } diff --git a/project/plugins/Plugins.scala b/project/plugins/Plugins.scala index ce3e609964..a0c81fb26d 100644 --- a/project/plugins/Plugins.scala +++ b/project/plugins/Plugins.scala @@ -6,7 +6,6 @@ class Plugins(info: ProjectInfo) extends PluginDefinition(info) { // All repositories *must* go here! See ModuleConigurations below. // ------------------------------------------------------------------------------------------------------------------- object Repositories { - lazy val AquteRepo = "aQute Maven Repository" at "http://www.aqute.biz/repo" lazy val DatabinderRepo = "Databinder Repository" at "http://databinder.net/repo" } @@ -17,12 +16,11 @@ class Plugins(info: ProjectInfo) extends PluginDefinition(info) { // Therefore, if repositories are defined, this must happen as def, not as val. // ------------------------------------------------------------------------------------------------------------------- import Repositories._ - lazy val aquteModuleConfig = ModuleConfiguration("biz.aQute", AquteRepo) - lazy val spdeModuleConfig = ModuleConfiguration("us.technically.spde", DatabinderRepo) + lazy val spdeModuleConfig = ModuleConfiguration("us.technically.spde", DatabinderRepo) // ------------------------------------------------------------------------------------------------------------------- // Dependencies // ------------------------------------------------------------------------------------------------------------------- - lazy val bnd4sbt = "com.weiglewilczek.bnd4sbt" % "bnd4sbt" % "1.0.1" - lazy val spdeSbt = "us.technically.spde" % "spde-sbt-plugin" % "0.4.2" + lazy val bnd4sbt = "com.weiglewilczek.bnd4sbt" % "bnd4sbt" % "1.0.2" + lazy val spdeSbt = "us.technically.spde" % "spde-sbt-plugin" % "0.4.2" }